repo_name
stringclasses
6 values
pr_number
int64
512
78.9k
pr_title
stringlengths
3
144
pr_description
stringlengths
0
30.3k
author
stringlengths
2
21
date_created
timestamp[ns, tz=UTC]
date_merged
timestamp[ns, tz=UTC]
previous_commit
stringlengths
40
40
pr_commit
stringlengths
40
40
query
stringlengths
17
30.4k
filepath
stringlengths
9
210
before_content
stringlengths
0
112M
after_content
stringlengths
0
112M
label
int64
-1
1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd.Arm64/MultiplyExtendedScalarBySelectedScalar.Vector64.Single.Vector64.Single.1.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void MultiplyExtendedScalarBySelectedScalar_Vector64_Single_Vector64_Single_1() { var test = new ImmBinaryOpTest__MultiplyExtendedScalarBySelectedScalar_Vector64_Single_Vector64_Single_1(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ImmBinaryOpTest__MultiplyExtendedScalarBySelectedScalar_Vector64_Single_Vector64_Single_1 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Single[] inArray1, Single[] inArray2, Single[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Single>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Single>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Single>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Single, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Single, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Single> _fld1; public Vector64<Single> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref testStruct._fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref testStruct._fld2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); return testStruct; } public void RunStructFldScenario(ImmBinaryOpTest__MultiplyExtendedScalarBySelectedScalar_Vector64_Single_Vector64_Single_1 testClass) { var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar(_fld1, _fld2, 1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(ImmBinaryOpTest__MultiplyExtendedScalarBySelectedScalar_Vector64_Single_Vector64_Single_1 testClass) { fixed (Vector64<Single>* pFld1 = &_fld1) fixed (Vector64<Single>* pFld2 = &_fld2) { var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar( AdvSimd.LoadVector64((Single*)(pFld1)), AdvSimd.LoadVector64((Single*)(pFld2)), 1 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Single>>() / sizeof(Single); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Single>>() / sizeof(Single); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Single>>() / sizeof(Single); private static readonly byte Imm = 1; private static Single[] _data1 = new Single[Op1ElementCount]; private static Single[] _data2 = new Single[Op2ElementCount]; private static Vector64<Single> _clsVar1; private static Vector64<Single> _clsVar2; private Vector64<Single> _fld1; private Vector64<Single> _fld2; private DataTable _dataTable; static ImmBinaryOpTest__MultiplyExtendedScalarBySelectedScalar_Vector64_Single_Vector64_Single_1() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref _clsVar1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref _clsVar2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); } public ImmBinaryOpTest__MultiplyExtendedScalarBySelectedScalar_Vector64_Single_Vector64_Single_1() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref _fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref _fld2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } _dataTable = new DataTable(_data1, _data2, new Single[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.Arm64.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar( Unsafe.Read<Vector64<Single>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Single>>(_dataTable.inArray2Ptr), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar( AdvSimd.LoadVector64((Single*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Single*)(_dataTable.inArray2Ptr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar), new Type[] { typeof(Vector64<Single>), typeof(Vector64<Single>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Single>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Single>>(_dataTable.inArray2Ptr), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Single>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar), new Type[] { typeof(Vector64<Single>), typeof(Vector64<Single>), typeof(byte) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((Single*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Single*)(_dataTable.inArray2Ptr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Single>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar( _clsVar1, _clsVar2, 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<Single>* pClsVar1 = &_clsVar1) fixed (Vector64<Single>* pClsVar2 = &_clsVar2) { var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar( AdvSimd.LoadVector64((Single*)(pClsVar1)), AdvSimd.LoadVector64((Single*)(pClsVar2)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Single>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<Single>>(_dataTable.inArray2Ptr); var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar(op1, op2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((Single*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector64((Single*)(_dataTable.inArray2Ptr)); var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar(op1, op2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ImmBinaryOpTest__MultiplyExtendedScalarBySelectedScalar_Vector64_Single_Vector64_Single_1(); var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar(test._fld1, test._fld2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new ImmBinaryOpTest__MultiplyExtendedScalarBySelectedScalar_Vector64_Single_Vector64_Single_1(); fixed (Vector64<Single>* pFld1 = &test._fld1) fixed (Vector64<Single>* pFld2 = &test._fld2) { var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar( AdvSimd.LoadVector64((Single*)(pFld1)), AdvSimd.LoadVector64((Single*)(pFld2)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar(_fld1, _fld2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<Single>* pFld1 = &_fld1) fixed (Vector64<Single>* pFld2 = &_fld2) { var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar( AdvSimd.LoadVector64((Single*)(pFld1)), AdvSimd.LoadVector64((Single*)(pFld2)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar(test._fld1, test._fld2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar( AdvSimd.LoadVector64((Single*)(&test._fld1)), AdvSimd.LoadVector64((Single*)(&test._fld2)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<Single> firstOp, Vector64<Single> secondOp, void* result, [CallerMemberName] string method = "") { Single[] inArray1 = new Single[Op1ElementCount]; Single[] inArray2 = new Single[Op2ElementCount]; Single[] outArray = new Single[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), firstOp); Unsafe.WriteUnaligned(ref Unsafe.As<Single, byte>(ref inArray2[0]), secondOp); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Single>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* firstOp, void* secondOp, void* result, [CallerMemberName] string method = "") { Single[] inArray1 = new Single[Op1ElementCount]; Single[] inArray2 = new Single[Op2ElementCount]; Single[] outArray = new Single[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector64<Single>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(secondOp), (uint)Unsafe.SizeOf<Vector64<Single>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Single>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Single[] firstOp, Single[] secondOp, Single[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (BitConverter.SingleToInt32Bits(Helpers.MultiplyExtended(firstOp[0], secondOp[Imm])) != BitConverter.SingleToInt32Bits(result[0])) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (BitConverter.SingleToInt32Bits(result[i]) != 0) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd.Arm64)}.{nameof(AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar)}<Single>(Vector64<Single>, Vector64<Single>, 1): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" secondOp: ({string.Join(", ", secondOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void MultiplyExtendedScalarBySelectedScalar_Vector64_Single_Vector64_Single_1() { var test = new ImmBinaryOpTest__MultiplyExtendedScalarBySelectedScalar_Vector64_Single_Vector64_Single_1(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ImmBinaryOpTest__MultiplyExtendedScalarBySelectedScalar_Vector64_Single_Vector64_Single_1 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Single[] inArray1, Single[] inArray2, Single[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Single>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Single>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Single>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Single, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Single, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Single> _fld1; public Vector64<Single> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref testStruct._fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref testStruct._fld2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); return testStruct; } public void RunStructFldScenario(ImmBinaryOpTest__MultiplyExtendedScalarBySelectedScalar_Vector64_Single_Vector64_Single_1 testClass) { var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar(_fld1, _fld2, 1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(ImmBinaryOpTest__MultiplyExtendedScalarBySelectedScalar_Vector64_Single_Vector64_Single_1 testClass) { fixed (Vector64<Single>* pFld1 = &_fld1) fixed (Vector64<Single>* pFld2 = &_fld2) { var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar( AdvSimd.LoadVector64((Single*)(pFld1)), AdvSimd.LoadVector64((Single*)(pFld2)), 1 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Single>>() / sizeof(Single); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Single>>() / sizeof(Single); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Single>>() / sizeof(Single); private static readonly byte Imm = 1; private static Single[] _data1 = new Single[Op1ElementCount]; private static Single[] _data2 = new Single[Op2ElementCount]; private static Vector64<Single> _clsVar1; private static Vector64<Single> _clsVar2; private Vector64<Single> _fld1; private Vector64<Single> _fld2; private DataTable _dataTable; static ImmBinaryOpTest__MultiplyExtendedScalarBySelectedScalar_Vector64_Single_Vector64_Single_1() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref _clsVar1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref _clsVar2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); } public ImmBinaryOpTest__MultiplyExtendedScalarBySelectedScalar_Vector64_Single_Vector64_Single_1() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref _fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref _fld2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } _dataTable = new DataTable(_data1, _data2, new Single[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.Arm64.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar( Unsafe.Read<Vector64<Single>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Single>>(_dataTable.inArray2Ptr), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar( AdvSimd.LoadVector64((Single*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Single*)(_dataTable.inArray2Ptr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar), new Type[] { typeof(Vector64<Single>), typeof(Vector64<Single>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Single>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Single>>(_dataTable.inArray2Ptr), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Single>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar), new Type[] { typeof(Vector64<Single>), typeof(Vector64<Single>), typeof(byte) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((Single*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Single*)(_dataTable.inArray2Ptr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Single>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar( _clsVar1, _clsVar2, 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<Single>* pClsVar1 = &_clsVar1) fixed (Vector64<Single>* pClsVar2 = &_clsVar2) { var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar( AdvSimd.LoadVector64((Single*)(pClsVar1)), AdvSimd.LoadVector64((Single*)(pClsVar2)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Single>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<Single>>(_dataTable.inArray2Ptr); var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar(op1, op2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((Single*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector64((Single*)(_dataTable.inArray2Ptr)); var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar(op1, op2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ImmBinaryOpTest__MultiplyExtendedScalarBySelectedScalar_Vector64_Single_Vector64_Single_1(); var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar(test._fld1, test._fld2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new ImmBinaryOpTest__MultiplyExtendedScalarBySelectedScalar_Vector64_Single_Vector64_Single_1(); fixed (Vector64<Single>* pFld1 = &test._fld1) fixed (Vector64<Single>* pFld2 = &test._fld2) { var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar( AdvSimd.LoadVector64((Single*)(pFld1)), AdvSimd.LoadVector64((Single*)(pFld2)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar(_fld1, _fld2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<Single>* pFld1 = &_fld1) fixed (Vector64<Single>* pFld2 = &_fld2) { var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar( AdvSimd.LoadVector64((Single*)(pFld1)), AdvSimd.LoadVector64((Single*)(pFld2)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar(test._fld1, test._fld2, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar( AdvSimd.LoadVector64((Single*)(&test._fld1)), AdvSimd.LoadVector64((Single*)(&test._fld2)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<Single> firstOp, Vector64<Single> secondOp, void* result, [CallerMemberName] string method = "") { Single[] inArray1 = new Single[Op1ElementCount]; Single[] inArray2 = new Single[Op2ElementCount]; Single[] outArray = new Single[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), firstOp); Unsafe.WriteUnaligned(ref Unsafe.As<Single, byte>(ref inArray2[0]), secondOp); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Single>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* firstOp, void* secondOp, void* result, [CallerMemberName] string method = "") { Single[] inArray1 = new Single[Op1ElementCount]; Single[] inArray2 = new Single[Op2ElementCount]; Single[] outArray = new Single[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector64<Single>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(secondOp), (uint)Unsafe.SizeOf<Vector64<Single>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Single>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Single[] firstOp, Single[] secondOp, Single[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (BitConverter.SingleToInt32Bits(Helpers.MultiplyExtended(firstOp[0], secondOp[Imm])) != BitConverter.SingleToInt32Bits(result[0])) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (BitConverter.SingleToInt32Bits(result[i]) != 0) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd.Arm64)}.{nameof(AdvSimd.Arm64.MultiplyExtendedScalarBySelectedScalar)}<Single>(Vector64<Single>, Vector64<Single>, 1): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" secondOp: ({string.Join(", ", secondOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/CodeGenBringUpTests/FPSub_d.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="FPSub.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="FPSub.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/libraries/System.Reflection.Emit.Lightweight/tests/DynamicMethodCreateDelegate.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using Xunit; namespace System.Reflection.Emit.Tests { public class DynamicMethodCreateDelegateTests { private const string FieldName = "_id"; public static IEnumerable<object[]> Targets_TestData() { yield return new object[] { new IDClass() }; yield return new object[] { new IDSubClass() }; } [Theory] [MemberData(nameof(Targets_TestData))] public void CreateDelegate_Target_Type(IDClass target) { int newId = 0; FieldInfo field = typeof(IDClass).GetField(FieldName, BindingFlags.NonPublic | BindingFlags.Instance); DynamicMethod method = new DynamicMethod("Method", typeof(int), new Type[] { typeof(IDClass), typeof(int) }, typeof(IDClass)); ILGenerator ilGenerator = method.GetILGenerator(); Helpers.EmitMethodBody(ilGenerator, field); IntDelegate instanceCallBack = (IntDelegate)method.CreateDelegate(typeof(IntDelegate), target); Assert.Equal(instanceCallBack(newId), target.ID); Assert.Equal(newId, target.ID); } [Theory] [MemberData(nameof(Targets_TestData))] public void CreateDelegate_Target_Module(IDClass target) { Module module = typeof(TestClass).GetTypeInfo().Module; int newId = 0; FieldInfo field = typeof(IDClass).GetField(FieldName, BindingFlags.NonPublic | BindingFlags.Instance); DynamicMethod method = new DynamicMethod("Method", typeof(int), new Type[] { typeof(IDClass), typeof(int) }, module, true); ILGenerator ilGenerator = method.GetILGenerator(); Helpers.EmitMethodBody(ilGenerator, field); IntDelegate instanceCallBack = (IntDelegate)method.CreateDelegate(typeof(IntDelegate), target); Assert.Equal(instanceCallBack(newId), target.ID); Assert.Equal(newId, target.ID); } [Theory] [MemberData(nameof(Targets_TestData))] public void CreateDelegate_Type(IDClass target) { int newId = 0; FieldInfo field = typeof(IDClass).GetField(FieldName, BindingFlags.NonPublic | BindingFlags.Instance); DynamicMethod method = new DynamicMethod("Method", typeof(int), new Type[] { typeof(IDClass), typeof(int) }, typeof(IDClass)); ILGenerator ilGenerator = method.GetILGenerator(); Helpers.EmitMethodBody(ilGenerator, field); IDClassDelegate staticCallBack = (IDClassDelegate)method.CreateDelegate(typeof(IDClassDelegate)); Assert.Equal(staticCallBack(target, newId), target.ID); Assert.Equal(newId, target.ID); } [Theory] [MemberData(nameof(Targets_TestData))] public void CreateDelegate_Module(IDClass target) { Module module = typeof(TestClass).GetTypeInfo().Module; int newId = 0; FieldInfo field = typeof(IDClass).GetField(FieldName, BindingFlags.NonPublic | BindingFlags.Instance); DynamicMethod method = new DynamicMethod("Method", typeof(int), new Type[] { typeof(IDClass), typeof(int) }, module, true); ILGenerator ilGenerator = method.GetILGenerator(); Helpers.EmitMethodBody(ilGenerator, field); IDClassDelegate staticCallBack = (IDClassDelegate)method.CreateDelegate(typeof(IDClassDelegate)); Assert.Equal(staticCallBack(target, newId), target.ID); Assert.Equal(newId, target.ID); } [Fact] public void CreateDelegate_NoMethodBody_ThrowsInvalidOperationException() { IDClass target = new IDClass(); DynamicMethod method = new DynamicMethod("Method", typeof(int), new Type[] { typeof(IDClass), typeof(int) }, typeof(IDClass)); Assert.Throws<InvalidOperationException>(() => method.CreateDelegate(typeof(IntDelegate))); Assert.Throws<InvalidOperationException>(() => method.CreateDelegate(typeof(IntDelegate), target)); } [Fact] public void CreateDelegate_InvalidTarget_ThrowsArgumentException() { FieldInfo field = typeof(IDClass).GetField(FieldName, BindingFlags.NonPublic | BindingFlags.Instance); DynamicMethod method = new DynamicMethod("Method", typeof(int), new Type[] { typeof(IDClass), typeof(int) }, typeof(IDClass)); ILGenerator ilGenerator = method.GetILGenerator(); Helpers.EmitMethodBody(ilGenerator, field); AssertExtensions.Throws<ArgumentException>(null, () => method.CreateDelegate(typeof(IntDelegate), "foo")); } [Theory] [InlineData(typeof(InvalidRetType))] [InlineData(typeof(WrongParamNumber))] [InlineData(typeof(InvalidParamType))] public void CreateDelegate_DelegateTypeInvalid_ThrowsArgumentException(Type delegateType) { FieldInfo field = typeof(IDClass).GetField(FieldName, BindingFlags.NonPublic | BindingFlags.Instance); DynamicMethod method = new DynamicMethod("Method", typeof(int), new Type[] { typeof(IDClass), typeof(int) }, typeof(IDClass)); ILGenerator ilGenerator = method.GetILGenerator(); Helpers.EmitMethodBody(ilGenerator, field); AssertExtensions.Throws<ArgumentException>(null, () => method.CreateDelegate(delegateType)); AssertExtensions.Throws<ArgumentException>(null, () => method.CreateDelegate(delegateType, new IDClass())); } } public class IDSubClass : IDClass { public IDSubClass(int id) : base(id) { } public IDSubClass() : base() { } } public delegate int IDClassDelegate(IDClass owner, int id); public delegate IDClass InvalidRetType(int id); public delegate int WrongParamNumber(int id, int m); public delegate int InvalidParamType(IDClass owner); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using Xunit; namespace System.Reflection.Emit.Tests { public class DynamicMethodCreateDelegateTests { private const string FieldName = "_id"; public static IEnumerable<object[]> Targets_TestData() { yield return new object[] { new IDClass() }; yield return new object[] { new IDSubClass() }; } [Theory] [MemberData(nameof(Targets_TestData))] public void CreateDelegate_Target_Type(IDClass target) { int newId = 0; FieldInfo field = typeof(IDClass).GetField(FieldName, BindingFlags.NonPublic | BindingFlags.Instance); DynamicMethod method = new DynamicMethod("Method", typeof(int), new Type[] { typeof(IDClass), typeof(int) }, typeof(IDClass)); ILGenerator ilGenerator = method.GetILGenerator(); Helpers.EmitMethodBody(ilGenerator, field); IntDelegate instanceCallBack = (IntDelegate)method.CreateDelegate(typeof(IntDelegate), target); Assert.Equal(instanceCallBack(newId), target.ID); Assert.Equal(newId, target.ID); } [Theory] [MemberData(nameof(Targets_TestData))] public void CreateDelegate_Target_Module(IDClass target) { Module module = typeof(TestClass).GetTypeInfo().Module; int newId = 0; FieldInfo field = typeof(IDClass).GetField(FieldName, BindingFlags.NonPublic | BindingFlags.Instance); DynamicMethod method = new DynamicMethod("Method", typeof(int), new Type[] { typeof(IDClass), typeof(int) }, module, true); ILGenerator ilGenerator = method.GetILGenerator(); Helpers.EmitMethodBody(ilGenerator, field); IntDelegate instanceCallBack = (IntDelegate)method.CreateDelegate(typeof(IntDelegate), target); Assert.Equal(instanceCallBack(newId), target.ID); Assert.Equal(newId, target.ID); } [Theory] [MemberData(nameof(Targets_TestData))] public void CreateDelegate_Type(IDClass target) { int newId = 0; FieldInfo field = typeof(IDClass).GetField(FieldName, BindingFlags.NonPublic | BindingFlags.Instance); DynamicMethod method = new DynamicMethod("Method", typeof(int), new Type[] { typeof(IDClass), typeof(int) }, typeof(IDClass)); ILGenerator ilGenerator = method.GetILGenerator(); Helpers.EmitMethodBody(ilGenerator, field); IDClassDelegate staticCallBack = (IDClassDelegate)method.CreateDelegate(typeof(IDClassDelegate)); Assert.Equal(staticCallBack(target, newId), target.ID); Assert.Equal(newId, target.ID); } [Theory] [MemberData(nameof(Targets_TestData))] public void CreateDelegate_Module(IDClass target) { Module module = typeof(TestClass).GetTypeInfo().Module; int newId = 0; FieldInfo field = typeof(IDClass).GetField(FieldName, BindingFlags.NonPublic | BindingFlags.Instance); DynamicMethod method = new DynamicMethod("Method", typeof(int), new Type[] { typeof(IDClass), typeof(int) }, module, true); ILGenerator ilGenerator = method.GetILGenerator(); Helpers.EmitMethodBody(ilGenerator, field); IDClassDelegate staticCallBack = (IDClassDelegate)method.CreateDelegate(typeof(IDClassDelegate)); Assert.Equal(staticCallBack(target, newId), target.ID); Assert.Equal(newId, target.ID); } [Fact] public void CreateDelegate_NoMethodBody_ThrowsInvalidOperationException() { IDClass target = new IDClass(); DynamicMethod method = new DynamicMethod("Method", typeof(int), new Type[] { typeof(IDClass), typeof(int) }, typeof(IDClass)); Assert.Throws<InvalidOperationException>(() => method.CreateDelegate(typeof(IntDelegate))); Assert.Throws<InvalidOperationException>(() => method.CreateDelegate(typeof(IntDelegate), target)); } [Fact] public void CreateDelegate_InvalidTarget_ThrowsArgumentException() { FieldInfo field = typeof(IDClass).GetField(FieldName, BindingFlags.NonPublic | BindingFlags.Instance); DynamicMethod method = new DynamicMethod("Method", typeof(int), new Type[] { typeof(IDClass), typeof(int) }, typeof(IDClass)); ILGenerator ilGenerator = method.GetILGenerator(); Helpers.EmitMethodBody(ilGenerator, field); AssertExtensions.Throws<ArgumentException>(null, () => method.CreateDelegate(typeof(IntDelegate), "foo")); } [Theory] [InlineData(typeof(InvalidRetType))] [InlineData(typeof(WrongParamNumber))] [InlineData(typeof(InvalidParamType))] public void CreateDelegate_DelegateTypeInvalid_ThrowsArgumentException(Type delegateType) { FieldInfo field = typeof(IDClass).GetField(FieldName, BindingFlags.NonPublic | BindingFlags.Instance); DynamicMethod method = new DynamicMethod("Method", typeof(int), new Type[] { typeof(IDClass), typeof(int) }, typeof(IDClass)); ILGenerator ilGenerator = method.GetILGenerator(); Helpers.EmitMethodBody(ilGenerator, field); AssertExtensions.Throws<ArgumentException>(null, () => method.CreateDelegate(delegateType)); AssertExtensions.Throws<ArgumentException>(null, () => method.CreateDelegate(delegateType, new IDClass())); } } public class IDSubClass : IDClass { public IDSubClass(int id) : base(id) { } public IDSubClass() : base() { } } public delegate int IDClassDelegate(IDClass owner, int id); public delegate IDClass InvalidRetType(int id); public delegate int WrongParamNumber(int id, int m); public delegate int InvalidParamType(IDClass owner); }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/HardwareIntrinsics/General/Vector128/CreateElement.SByte.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\General\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void CreateElementSByte() { var test = new VectorCreate__CreateElementSByte(); // Validates basic functionality works test.RunBasicScenario(); // Validates calling via reflection works test.RunReflectionScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorCreate__CreateElementSByte { private static readonly int LargestVectorSize = 16; private static readonly int ElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte); public bool Succeeded { get; set; } = true; public void RunBasicScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario)); SByte[] values = new SByte[ElementCount]; for (int i = 0; i < ElementCount; i++) { values[i] = TestLibrary.Generator.GetSByte(); } Vector128<SByte> result = Vector128.Create(values[0], values[1], values[2], values[3], values[4], values[5], values[6], values[7], values[8], values[9], values[10], values[11], values[12], values[13], values[14], values[15]); ValidateResult(result, values); } public void RunReflectionScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario)); Type[] operandTypes = new Type[ElementCount]; SByte[] values = new SByte[ElementCount]; for (int i = 0; i < ElementCount; i++) { operandTypes[i] = typeof(SByte); values[i] = TestLibrary.Generator.GetSByte(); } object result = typeof(Vector128) .GetMethod(nameof(Vector128.Create), operandTypes) .Invoke(null, new object[] { values[0], values[1], values[2], values[3], values[4], values[5], values[6], values[7], values[8], values[9], values[10], values[11], values[12], values[13], values[14], values[15] }); ValidateResult((Vector128<SByte>)(result), values); } private void ValidateResult(Vector128<SByte> result, SByte[] expectedValues, [CallerMemberName] string method = "") { SByte[] resultElements = new SByte[ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref resultElements[0]), result); ValidateResult(resultElements, expectedValues, method); } private void ValidateResult(SByte[] resultElements, SByte[] expectedValues, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < ElementCount; i++) { if (resultElements[i] != expectedValues[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"Vector128.Create(SByte): {method} failed:"); TestLibrary.TestFramework.LogInformation($" value: ({string.Join(", ", expectedValues)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", resultElements)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\General\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void CreateElementSByte() { var test = new VectorCreate__CreateElementSByte(); // Validates basic functionality works test.RunBasicScenario(); // Validates calling via reflection works test.RunReflectionScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorCreate__CreateElementSByte { private static readonly int LargestVectorSize = 16; private static readonly int ElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte); public bool Succeeded { get; set; } = true; public void RunBasicScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario)); SByte[] values = new SByte[ElementCount]; for (int i = 0; i < ElementCount; i++) { values[i] = TestLibrary.Generator.GetSByte(); } Vector128<SByte> result = Vector128.Create(values[0], values[1], values[2], values[3], values[4], values[5], values[6], values[7], values[8], values[9], values[10], values[11], values[12], values[13], values[14], values[15]); ValidateResult(result, values); } public void RunReflectionScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario)); Type[] operandTypes = new Type[ElementCount]; SByte[] values = new SByte[ElementCount]; for (int i = 0; i < ElementCount; i++) { operandTypes[i] = typeof(SByte); values[i] = TestLibrary.Generator.GetSByte(); } object result = typeof(Vector128) .GetMethod(nameof(Vector128.Create), operandTypes) .Invoke(null, new object[] { values[0], values[1], values[2], values[3], values[4], values[5], values[6], values[7], values[8], values[9], values[10], values[11], values[12], values[13], values[14], values[15] }); ValidateResult((Vector128<SByte>)(result), values); } private void ValidateResult(Vector128<SByte> result, SByte[] expectedValues, [CallerMemberName] string method = "") { SByte[] resultElements = new SByte[ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref resultElements[0]), result); ValidateResult(resultElements, expectedValues, method); } private void ValidateResult(SByte[] resultElements, SByte[] expectedValues, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < ElementCount; i++) { if (resultElements[i] != expectedValues[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"Vector128.Create(SByte): {method} failed:"); TestLibrary.TestFramework.LogInformation($" value: ({string.Join(", ", expectedValues)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", resultElements)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/libraries/Microsoft.Extensions.DependencyModel/src/Resolution/PackageCompilationAssemblyResolver.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; using System.IO; namespace Microsoft.Extensions.DependencyModel.Resolution { public class PackageCompilationAssemblyResolver: ICompilationAssemblyResolver { private readonly IFileSystem _fileSystem; private readonly string[] _nugetPackageDirectories; public PackageCompilationAssemblyResolver() : this(EnvironmentWrapper.Default, FileSystemWrapper.Default) { } public PackageCompilationAssemblyResolver(string nugetPackageDirectory) : this(FileSystemWrapper.Default, new string[] { nugetPackageDirectory }) { } internal PackageCompilationAssemblyResolver(IEnvironment environment, IFileSystem fileSystem) : this(fileSystem, GetDefaultProbeDirectories(environment)) { } internal PackageCompilationAssemblyResolver(IFileSystem fileSystem!!, string[] nugetPackageDirectories!!) { _fileSystem = fileSystem; _nugetPackageDirectories = nugetPackageDirectories; } internal static string[] GetDefaultProbeDirectories(IEnvironment environment) { object? probeDirectories = environment.GetAppContextData("PROBING_DIRECTORIES"); string? listOfDirectories = probeDirectories as string; if (!string.IsNullOrEmpty(listOfDirectories)) { return listOfDirectories.Split(new char[] { Path.PathSeparator }, StringSplitOptions.RemoveEmptyEntries); } string? packageDirectory = environment.GetEnvironmentVariable("NUGET_PACKAGES"); if (!string.IsNullOrEmpty(packageDirectory)) { return new string[] { packageDirectory }; } string? basePath; if (environment.IsWindows()) { basePath = environment.GetEnvironmentVariable("USERPROFILE"); } else { basePath = environment.GetEnvironmentVariable("HOME"); } if (string.IsNullOrEmpty(basePath)) { return new string[] { string.Empty }; } return new string[] { Path.Combine(basePath, ".nuget", "packages") }; } public bool TryResolveAssemblyPaths(CompilationLibrary library!!, List<string>? assemblies) { if (_nugetPackageDirectories == null || _nugetPackageDirectories.Length == 0 || !string.Equals(library.Type, "package", StringComparison.OrdinalIgnoreCase)) { return false; } foreach (string directory in _nugetPackageDirectories) { string packagePath; if (ResolverUtils.TryResolvePackagePath(_fileSystem, library, directory, out packagePath)) { if (TryResolveFromPackagePath(_fileSystem, library, packagePath, out IEnumerable<string>? fullPathsFromPackage)) { assemblies?.AddRange(fullPathsFromPackage); return true; } } } return false; } private static bool TryResolveFromPackagePath(IFileSystem fileSystem, CompilationLibrary library, string basePath, [MaybeNullWhen(false)] out IEnumerable<string> results) { var paths = new List<string>(); foreach (string assembly in library.Assemblies) { if (!ResolverUtils.TryResolveAssemblyFile(fileSystem, basePath, assembly, out string fullName)) { // if one of the files can't be found, skip this package path completely. // there are package paths that don't include all of the "ref" assemblies // (ex. ones created by 'dotnet store') results = null; return false; } paths.Add(fullName); } results = paths; return true; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; using System.IO; namespace Microsoft.Extensions.DependencyModel.Resolution { public class PackageCompilationAssemblyResolver: ICompilationAssemblyResolver { private readonly IFileSystem _fileSystem; private readonly string[] _nugetPackageDirectories; public PackageCompilationAssemblyResolver() : this(EnvironmentWrapper.Default, FileSystemWrapper.Default) { } public PackageCompilationAssemblyResolver(string nugetPackageDirectory) : this(FileSystemWrapper.Default, new string[] { nugetPackageDirectory }) { } internal PackageCompilationAssemblyResolver(IEnvironment environment, IFileSystem fileSystem) : this(fileSystem, GetDefaultProbeDirectories(environment)) { } internal PackageCompilationAssemblyResolver(IFileSystem fileSystem!!, string[] nugetPackageDirectories!!) { _fileSystem = fileSystem; _nugetPackageDirectories = nugetPackageDirectories; } internal static string[] GetDefaultProbeDirectories(IEnvironment environment) { object? probeDirectories = environment.GetAppContextData("PROBING_DIRECTORIES"); string? listOfDirectories = probeDirectories as string; if (!string.IsNullOrEmpty(listOfDirectories)) { return listOfDirectories.Split(new char[] { Path.PathSeparator }, StringSplitOptions.RemoveEmptyEntries); } string? packageDirectory = environment.GetEnvironmentVariable("NUGET_PACKAGES"); if (!string.IsNullOrEmpty(packageDirectory)) { return new string[] { packageDirectory }; } string? basePath; if (environment.IsWindows()) { basePath = environment.GetEnvironmentVariable("USERPROFILE"); } else { basePath = environment.GetEnvironmentVariable("HOME"); } if (string.IsNullOrEmpty(basePath)) { return new string[] { string.Empty }; } return new string[] { Path.Combine(basePath, ".nuget", "packages") }; } public bool TryResolveAssemblyPaths(CompilationLibrary library!!, List<string>? assemblies) { if (_nugetPackageDirectories == null || _nugetPackageDirectories.Length == 0 || !string.Equals(library.Type, "package", StringComparison.OrdinalIgnoreCase)) { return false; } foreach (string directory in _nugetPackageDirectories) { string packagePath; if (ResolverUtils.TryResolvePackagePath(_fileSystem, library, directory, out packagePath)) { if (TryResolveFromPackagePath(_fileSystem, library, packagePath, out IEnumerable<string>? fullPathsFromPackage)) { assemblies?.AddRange(fullPathsFromPackage); return true; } } } return false; } private static bool TryResolveFromPackagePath(IFileSystem fileSystem, CompilationLibrary library, string basePath, [MaybeNullWhen(false)] out IEnumerable<string> results) { var paths = new List<string>(); foreach (string assembly in library.Assemblies) { if (!ResolverUtils.TryResolveAssemblyFile(fileSystem, basePath, assembly, out string fullName)) { // if one of the files can't be found, skip this package path completely. // there are package paths that don't include all of the "ref" assemblies // (ex. ones created by 'dotnet store') results = null; return false; } paths.Add(fullName); } results = paths; return true; } } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/HardwareIntrinsics/X86/Avx2/ShiftLeftLogical.Int32.32.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void ShiftLeftLogicalInt3232() { var test = new ImmUnaryOpTest__ShiftLeftLogicalInt3232(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ImmUnaryOpTest__ShiftLeftLogicalInt3232 { private struct TestStruct { public Vector256<Int32> _fld; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int32>, byte>(ref testStruct._fld), ref Unsafe.As<Int32, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector256<Int32>>()); return testStruct; } public void RunStructFldScenario(ImmUnaryOpTest__ShiftLeftLogicalInt3232 testClass) { var result = Avx2.ShiftLeftLogical(_fld, 32); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 32; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<Int32>>() / sizeof(Int32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<Int32>>() / sizeof(Int32); private static Int32[] _data = new Int32[Op1ElementCount]; private static Vector256<Int32> _clsVar; private Vector256<Int32> _fld; private SimpleUnaryOpTest__DataTable<Int32, Int32> _dataTable; static ImmUnaryOpTest__ShiftLeftLogicalInt3232() { for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int32>, byte>(ref _clsVar), ref Unsafe.As<Int32, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector256<Int32>>()); } public ImmUnaryOpTest__ShiftLeftLogicalInt3232() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int32>, byte>(ref _fld), ref Unsafe.As<Int32, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector256<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new SimpleUnaryOpTest__DataTable<Int32, Int32>(_data, new Int32[RetElementCount], LargestVectorSize); } public bool IsSupported => Avx2.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Avx2.ShiftLeftLogical( Unsafe.Read<Vector256<Int32>>(_dataTable.inArrayPtr), 32 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = Avx2.ShiftLeftLogical( Avx.LoadVector256((Int32*)(_dataTable.inArrayPtr)), 32 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned)); var result = Avx2.ShiftLeftLogical( Avx.LoadAlignedVector256((Int32*)(_dataTable.inArrayPtr)), 32 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Avx2).GetMethod(nameof(Avx2.ShiftLeftLogical), new Type[] { typeof(Vector256<Int32>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector256<Int32>>(_dataTable.inArrayPtr), (byte)32 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int32>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(Avx2).GetMethod(nameof(Avx2.ShiftLeftLogical), new Type[] { typeof(Vector256<Int32>), typeof(byte) }) .Invoke(null, new object[] { Avx.LoadVector256((Int32*)(_dataTable.inArrayPtr)), (byte)32 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int32>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned)); var result = typeof(Avx2).GetMethod(nameof(Avx2.ShiftLeftLogical), new Type[] { typeof(Vector256<Int32>), typeof(byte) }) .Invoke(null, new object[] { Avx.LoadAlignedVector256((Int32*)(_dataTable.inArrayPtr)), (byte)32 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int32>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Avx2.ShiftLeftLogical( _clsVar, 32 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var firstOp = Unsafe.Read<Vector256<Int32>>(_dataTable.inArrayPtr); var result = Avx2.ShiftLeftLogical(firstOp, 32); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var firstOp = Avx.LoadVector256((Int32*)(_dataTable.inArrayPtr)); var result = Avx2.ShiftLeftLogical(firstOp, 32); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned)); var firstOp = Avx.LoadAlignedVector256((Int32*)(_dataTable.inArrayPtr)); var result = Avx2.ShiftLeftLogical(firstOp, 32); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ImmUnaryOpTest__ShiftLeftLogicalInt3232(); var result = Avx2.ShiftLeftLogical(test._fld, 32); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Avx2.ShiftLeftLogical(_fld, 32); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Avx2.ShiftLeftLogical(test._fld, 32); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector256<Int32> firstOp, void* result, [CallerMemberName] string method = "") { Int32[] inArray = new Int32[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray[0]), firstOp); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Int32>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(void* firstOp, void* result, [CallerMemberName] string method = "") { Int32[] inArray = new Int32[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector256<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Int32>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(Int32[] firstOp, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (0 != result[0]) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (0 != result[i]) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Avx2)}.{nameof(Avx2.ShiftLeftLogical)}<Int32>(Vector256<Int32><9>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void ShiftLeftLogicalInt3232() { var test = new ImmUnaryOpTest__ShiftLeftLogicalInt3232(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ImmUnaryOpTest__ShiftLeftLogicalInt3232 { private struct TestStruct { public Vector256<Int32> _fld; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int32>, byte>(ref testStruct._fld), ref Unsafe.As<Int32, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector256<Int32>>()); return testStruct; } public void RunStructFldScenario(ImmUnaryOpTest__ShiftLeftLogicalInt3232 testClass) { var result = Avx2.ShiftLeftLogical(_fld, 32); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 32; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<Int32>>() / sizeof(Int32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<Int32>>() / sizeof(Int32); private static Int32[] _data = new Int32[Op1ElementCount]; private static Vector256<Int32> _clsVar; private Vector256<Int32> _fld; private SimpleUnaryOpTest__DataTable<Int32, Int32> _dataTable; static ImmUnaryOpTest__ShiftLeftLogicalInt3232() { for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int32>, byte>(ref _clsVar), ref Unsafe.As<Int32, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector256<Int32>>()); } public ImmUnaryOpTest__ShiftLeftLogicalInt3232() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int32>, byte>(ref _fld), ref Unsafe.As<Int32, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector256<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new SimpleUnaryOpTest__DataTable<Int32, Int32>(_data, new Int32[RetElementCount], LargestVectorSize); } public bool IsSupported => Avx2.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Avx2.ShiftLeftLogical( Unsafe.Read<Vector256<Int32>>(_dataTable.inArrayPtr), 32 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = Avx2.ShiftLeftLogical( Avx.LoadVector256((Int32*)(_dataTable.inArrayPtr)), 32 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned)); var result = Avx2.ShiftLeftLogical( Avx.LoadAlignedVector256((Int32*)(_dataTable.inArrayPtr)), 32 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Avx2).GetMethod(nameof(Avx2.ShiftLeftLogical), new Type[] { typeof(Vector256<Int32>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector256<Int32>>(_dataTable.inArrayPtr), (byte)32 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int32>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(Avx2).GetMethod(nameof(Avx2.ShiftLeftLogical), new Type[] { typeof(Vector256<Int32>), typeof(byte) }) .Invoke(null, new object[] { Avx.LoadVector256((Int32*)(_dataTable.inArrayPtr)), (byte)32 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int32>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned)); var result = typeof(Avx2).GetMethod(nameof(Avx2.ShiftLeftLogical), new Type[] { typeof(Vector256<Int32>), typeof(byte) }) .Invoke(null, new object[] { Avx.LoadAlignedVector256((Int32*)(_dataTable.inArrayPtr)), (byte)32 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int32>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Avx2.ShiftLeftLogical( _clsVar, 32 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var firstOp = Unsafe.Read<Vector256<Int32>>(_dataTable.inArrayPtr); var result = Avx2.ShiftLeftLogical(firstOp, 32); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var firstOp = Avx.LoadVector256((Int32*)(_dataTable.inArrayPtr)); var result = Avx2.ShiftLeftLogical(firstOp, 32); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned)); var firstOp = Avx.LoadAlignedVector256((Int32*)(_dataTable.inArrayPtr)); var result = Avx2.ShiftLeftLogical(firstOp, 32); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ImmUnaryOpTest__ShiftLeftLogicalInt3232(); var result = Avx2.ShiftLeftLogical(test._fld, 32); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Avx2.ShiftLeftLogical(_fld, 32); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Avx2.ShiftLeftLogical(test._fld, 32); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector256<Int32> firstOp, void* result, [CallerMemberName] string method = "") { Int32[] inArray = new Int32[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray[0]), firstOp); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Int32>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(void* firstOp, void* result, [CallerMemberName] string method = "") { Int32[] inArray = new Int32[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector256<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Int32>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(Int32[] firstOp, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (0 != result[0]) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (0 != result[i]) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Avx2)}.{nameof(Avx2.ShiftLeftLogical)}<Int32>(Vector256<Int32><9>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/Generics/Parameters/static_equalnull_struct01.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="static_equalnull_struct01.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="static_equalnull_struct01.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/libraries/Common/src/Interop/Windows/Advapi32/Interop.IsWellKnownSid.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; internal static partial class Interop { internal static partial class Advapi32 { [LibraryImport(Interop.Libraries.Advapi32, EntryPoint = "IsWellKnownSid", SetLastError = true)] internal static partial int IsWellKnownSid( byte[] sid, int type); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; internal static partial class Interop { internal static partial class Advapi32 { [LibraryImport(Interop.Libraries.Advapi32, EntryPoint = "IsWellKnownSid", SetLastError = true)] internal static partial int IsWellKnownSid( byte[] sid, int type); } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/libraries/Common/src/Interop/Windows/Advapi32/Interop.SetThreadToken.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; using Microsoft.Win32.SafeHandles; internal static partial class Interop { internal static partial class Advapi32 { [LibraryImport(Libraries.Advapi32, SetLastError = true)] [return: MarshalAs(UnmanagedType.Bool)] internal static partial bool SetThreadToken( IntPtr ThreadHandle, SafeTokenHandle? hToken); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; using Microsoft.Win32.SafeHandles; internal static partial class Interop { internal static partial class Advapi32 { [LibraryImport(Libraries.Advapi32, SetLastError = true)] [return: MarshalAs(UnmanagedType.Bool)] internal static partial bool SetThreadToken( IntPtr ThreadHandle, SafeTokenHandle? hToken); } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/Regression/JitBlue/GitHub_18235/GitHub_18235_2.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType /> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType /> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/HardwareIntrinsics/General/Vector256/LessThanOrEqualAll.Int64.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void LessThanOrEqualAllInt64() { var test = new VectorBooleanBinaryOpTest__LessThanOrEqualAllInt64(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorBooleanBinaryOpTest__LessThanOrEqualAllInt64 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private GCHandle inHandle1; private GCHandle inHandle2; private ulong alignment; public DataTable(Int64[] inArray1, Int64[] inArray2, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int64>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int64, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector256<Int64> _fld1; public Vector256<Int64> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref testStruct._fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); return testStruct; } public void RunStructFldScenario(VectorBooleanBinaryOpTest__LessThanOrEqualAllInt64 testClass) { var result = Vector256.LessThanOrEqualAll(_fld1, _fld2); testClass.ValidateResult(_fld1, _fld2, result); } } private static readonly int LargestVectorSize = 32; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<Int64>>() / sizeof(Int64); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector256<Int64>>() / sizeof(Int64); private static Int64[] _data1 = new Int64[Op1ElementCount]; private static Int64[] _data2 = new Int64[Op2ElementCount]; private static Vector256<Int64> _clsVar1; private static Vector256<Int64> _clsVar2; private Vector256<Int64> _fld1; private Vector256<Int64> _fld2; private DataTable _dataTable; static VectorBooleanBinaryOpTest__LessThanOrEqualAllInt64() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref _clsVar2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); } public VectorBooleanBinaryOpTest__LessThanOrEqualAllInt64() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref _fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } _dataTable = new DataTable(_data1, _data2, LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Vector256.LessThanOrEqualAll( Unsafe.Read<Vector256<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<Int64>>(_dataTable.inArray2Ptr) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var method = typeof(Vector256).GetMethod(nameof(Vector256.LessThanOrEqualAll), new Type[] { typeof(Vector256<Int64>), typeof(Vector256<Int64>) }); if (method is null) { method = typeof(Vector256).GetMethod(nameof(Vector256.LessThanOrEqualAll), 1, new Type[] { typeof(Vector256<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(Vector256<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) }); } if (method.IsGenericMethodDefinition) { method = method.MakeGenericMethod(typeof(Int64)); } var result = method.Invoke(null, new object[] { Unsafe.Read<Vector256<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<Int64>>(_dataTable.inArray2Ptr) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result)); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Vector256.LessThanOrEqualAll( _clsVar1, _clsVar2 ); ValidateResult(_clsVar1, _clsVar2, result); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector256<Int64>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector256<Int64>>(_dataTable.inArray2Ptr); var result = Vector256.LessThanOrEqualAll(op1, op2); ValidateResult(op1, op2, result); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorBooleanBinaryOpTest__LessThanOrEqualAllInt64(); var result = Vector256.LessThanOrEqualAll(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Vector256.LessThanOrEqualAll(_fld1, _fld2); ValidateResult(_fld1, _fld2, result); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Vector256.LessThanOrEqualAll(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector256<Int64> op1, Vector256<Int64> op2, bool result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), op2); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(void* op1, void* op2, bool result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector256<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector256<Int64>>()); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(Int64[] left, Int64[] right, bool result, [CallerMemberName] string method = "") { bool succeeded = true; var expectedResult = true; for (var i = 0; i < Op1ElementCount; i++) { expectedResult &= (left[i] <= right[i]); } succeeded = (expectedResult == result); if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector256)}.{nameof(Vector256.LessThanOrEqualAll)}<Int64>(Vector256<Int64>, Vector256<Int64>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({result})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void LessThanOrEqualAllInt64() { var test = new VectorBooleanBinaryOpTest__LessThanOrEqualAllInt64(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorBooleanBinaryOpTest__LessThanOrEqualAllInt64 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private GCHandle inHandle1; private GCHandle inHandle2; private ulong alignment; public DataTable(Int64[] inArray1, Int64[] inArray2, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int64>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int64, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector256<Int64> _fld1; public Vector256<Int64> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref testStruct._fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); return testStruct; } public void RunStructFldScenario(VectorBooleanBinaryOpTest__LessThanOrEqualAllInt64 testClass) { var result = Vector256.LessThanOrEqualAll(_fld1, _fld2); testClass.ValidateResult(_fld1, _fld2, result); } } private static readonly int LargestVectorSize = 32; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<Int64>>() / sizeof(Int64); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector256<Int64>>() / sizeof(Int64); private static Int64[] _data1 = new Int64[Op1ElementCount]; private static Int64[] _data2 = new Int64[Op2ElementCount]; private static Vector256<Int64> _clsVar1; private static Vector256<Int64> _clsVar2; private Vector256<Int64> _fld1; private Vector256<Int64> _fld2; private DataTable _dataTable; static VectorBooleanBinaryOpTest__LessThanOrEqualAllInt64() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref _clsVar2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); } public VectorBooleanBinaryOpTest__LessThanOrEqualAllInt64() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref _fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } _dataTable = new DataTable(_data1, _data2, LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Vector256.LessThanOrEqualAll( Unsafe.Read<Vector256<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<Int64>>(_dataTable.inArray2Ptr) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var method = typeof(Vector256).GetMethod(nameof(Vector256.LessThanOrEqualAll), new Type[] { typeof(Vector256<Int64>), typeof(Vector256<Int64>) }); if (method is null) { method = typeof(Vector256).GetMethod(nameof(Vector256.LessThanOrEqualAll), 1, new Type[] { typeof(Vector256<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(Vector256<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) }); } if (method.IsGenericMethodDefinition) { method = method.MakeGenericMethod(typeof(Int64)); } var result = method.Invoke(null, new object[] { Unsafe.Read<Vector256<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<Int64>>(_dataTable.inArray2Ptr) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result)); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Vector256.LessThanOrEqualAll( _clsVar1, _clsVar2 ); ValidateResult(_clsVar1, _clsVar2, result); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector256<Int64>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector256<Int64>>(_dataTable.inArray2Ptr); var result = Vector256.LessThanOrEqualAll(op1, op2); ValidateResult(op1, op2, result); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorBooleanBinaryOpTest__LessThanOrEqualAllInt64(); var result = Vector256.LessThanOrEqualAll(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Vector256.LessThanOrEqualAll(_fld1, _fld2); ValidateResult(_fld1, _fld2, result); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Vector256.LessThanOrEqualAll(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector256<Int64> op1, Vector256<Int64> op2, bool result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), op2); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(void* op1, void* op2, bool result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector256<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector256<Int64>>()); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(Int64[] left, Int64[] right, bool result, [CallerMemberName] string method = "") { bool succeeded = true; var expectedResult = true; for (var i = 0; i < Op1ElementCount; i++) { expectedResult &= (left[i] <= right[i]); } succeeded = (expectedResult == result); if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector256)}.{nameof(Vector256.LessThanOrEqualAll)}<Int64>(Vector256<Int64>, Vector256<Int64>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({result})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd.Arm64/StorePairNonTemporal.Vector64.UInt16.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void StorePairNonTemporal_Vector64_UInt16() { var test = new StoreBinaryOpTest__StorePairNonTemporal_Vector64_UInt16(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class StoreBinaryOpTest__StorePairNonTemporal_Vector64_UInt16 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(UInt16[] inArray1, UInt16[] inArray2, UInt16[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt16>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt16>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt16>(); if ((alignment != 16 && alignment != 32) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt16, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt16, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<UInt16> _fld1; public Vector64<UInt16> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); return testStruct; } public void RunStructFldScenario(StoreBinaryOpTest__StorePairNonTemporal_Vector64_UInt16 testClass) { AdvSimd.Arm64.StorePairNonTemporal((UInt16*)testClass._dataTable.outArrayPtr, _fld1, _fld2); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(StoreBinaryOpTest__StorePairNonTemporal_Vector64_UInt16 testClass) { fixed (Vector64<UInt16>* pFld1 = &_fld1) fixed (Vector64<UInt16>* pFld2 = &_fld2) { AdvSimd.Arm64.StorePairNonTemporal( (UInt16*)testClass._dataTable.outArrayPtr, AdvSimd.LoadVector64((UInt16*)(pFld1)), AdvSimd.LoadVector64((UInt16*)(pFld2)) ); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<UInt16>>() / sizeof(UInt16); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<UInt16>>() / sizeof(UInt16); private static readonly int RetElementCount = Op1ElementCount + Op2ElementCount; private static UInt16[] _data1 = new UInt16[Op1ElementCount]; private static UInt16[] _data2 = new UInt16[Op2ElementCount]; private static Vector64<UInt16> _clsVar1; private static Vector64<UInt16> _clsVar2; private Vector64<UInt16> _fld1; private Vector64<UInt16> _fld2; private DataTable _dataTable; static StoreBinaryOpTest__StorePairNonTemporal_Vector64_UInt16() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _clsVar1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _clsVar2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); } public StoreBinaryOpTest__StorePairNonTemporal_Vector64_UInt16() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _fld1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } _dataTable = new DataTable(_data1, _data2, new UInt16[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.Arm64.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); AdvSimd.Arm64.StorePairNonTemporal( (UInt16*)_dataTable.outArrayPtr, Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray2Ptr) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); AdvSimd.Arm64.StorePairNonTemporal( (UInt16*)_dataTable.outArrayPtr, AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray2Ptr)) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.StorePairNonTemporal), new Type[] { typeof(UInt16*), typeof(Vector64<UInt16>), typeof(Vector64<UInt16>) }) .Invoke(null, new object[] { Pointer.Box(_dataTable.outArrayPtr, typeof(UInt16*)), Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray2Ptr) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.StorePairNonTemporal), new Type[] { typeof(UInt16*), typeof(Vector64<UInt16>), typeof(Vector64<UInt16>) }) .Invoke(null, new object[] { Pointer.Box(_dataTable.outArrayPtr, typeof(UInt16*)), AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray2Ptr)) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); AdvSimd.Arm64.StorePairNonTemporal((UInt16*)_dataTable.outArrayPtr, _clsVar1, _clsVar2); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<UInt16>* pClsVar1 = &_clsVar1) fixed (Vector64<UInt16>* pClsVar2 = &_clsVar2) { AdvSimd.Arm64.StorePairNonTemporal( (UInt16*)_dataTable.outArrayPtr, AdvSimd.LoadVector64((UInt16*)(pClsVar1)), AdvSimd.LoadVector64((UInt16*)(pClsVar2)) ); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray2Ptr); AdvSimd.Arm64.StorePairNonTemporal((UInt16*)_dataTable.outArrayPtr, op1, op2); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray2Ptr)); AdvSimd.Arm64.StorePairNonTemporal((UInt16*)_dataTable.outArrayPtr, op1, op2); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new StoreBinaryOpTest__StorePairNonTemporal_Vector64_UInt16(); AdvSimd.Arm64.StorePairNonTemporal((UInt16*)_dataTable.outArrayPtr, test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new StoreBinaryOpTest__StorePairNonTemporal_Vector64_UInt16(); fixed (Vector64<UInt16>* pFld1 = &test._fld1) fixed (Vector64<UInt16>* pFld2 = &test._fld2) { AdvSimd.Arm64.StorePairNonTemporal( (UInt16*)_dataTable.outArrayPtr, AdvSimd.LoadVector64((UInt16*)(pFld1)), AdvSimd.LoadVector64((UInt16*)(pFld2)) ); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); AdvSimd.Arm64.StorePairNonTemporal((UInt16*)_dataTable.outArrayPtr, _fld1, _fld2); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<UInt16>* pFld1 = &_fld1) fixed (Vector64<UInt16>* pFld2 = &_fld2) { AdvSimd.Arm64.StorePairNonTemporal( (UInt16*)_dataTable.outArrayPtr, AdvSimd.LoadVector64((UInt16*)(pFld1)), AdvSimd.LoadVector64((UInt16*)(pFld2)) ); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); AdvSimd.Arm64.StorePairNonTemporal((UInt16*)_dataTable.outArrayPtr, test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); AdvSimd.Arm64.StorePairNonTemporal( (UInt16*)_dataTable.outArrayPtr, AdvSimd.LoadVector64((UInt16*)(&test._fld1)), AdvSimd.LoadVector64((UInt16*)(&test._fld2)) ); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<UInt16> op1, Vector64<UInt16> op2, void* result, [CallerMemberName] string method = "") { UInt16[] inArray1 = new UInt16[Op1ElementCount]; UInt16[] inArray2 = new UInt16[Op2ElementCount]; UInt16[] outArray = new UInt16[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)(RetElementCount * Unsafe.SizeOf<UInt16>())); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { UInt16[] inArray1 = new UInt16[Op1ElementCount]; UInt16[] inArray2 = new UInt16[Op2ElementCount]; UInt16[] outArray = new UInt16[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)(RetElementCount * Unsafe.SizeOf<UInt16>())); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(UInt16[] firstOp, UInt16[] secondOp, UInt16[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (int i = 0; i < RetElementCount; i++) { if (Helpers.Concat(firstOp, secondOp, i) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd.Arm64)}.{nameof(AdvSimd.Arm64.StorePairNonTemporal)}<UInt16>(Vector64<UInt16>, Vector64<UInt16>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void StorePairNonTemporal_Vector64_UInt16() { var test = new StoreBinaryOpTest__StorePairNonTemporal_Vector64_UInt16(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class StoreBinaryOpTest__StorePairNonTemporal_Vector64_UInt16 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(UInt16[] inArray1, UInt16[] inArray2, UInt16[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt16>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt16>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt16>(); if ((alignment != 16 && alignment != 32) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt16, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt16, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<UInt16> _fld1; public Vector64<UInt16> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); return testStruct; } public void RunStructFldScenario(StoreBinaryOpTest__StorePairNonTemporal_Vector64_UInt16 testClass) { AdvSimd.Arm64.StorePairNonTemporal((UInt16*)testClass._dataTable.outArrayPtr, _fld1, _fld2); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(StoreBinaryOpTest__StorePairNonTemporal_Vector64_UInt16 testClass) { fixed (Vector64<UInt16>* pFld1 = &_fld1) fixed (Vector64<UInt16>* pFld2 = &_fld2) { AdvSimd.Arm64.StorePairNonTemporal( (UInt16*)testClass._dataTable.outArrayPtr, AdvSimd.LoadVector64((UInt16*)(pFld1)), AdvSimd.LoadVector64((UInt16*)(pFld2)) ); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<UInt16>>() / sizeof(UInt16); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<UInt16>>() / sizeof(UInt16); private static readonly int RetElementCount = Op1ElementCount + Op2ElementCount; private static UInt16[] _data1 = new UInt16[Op1ElementCount]; private static UInt16[] _data2 = new UInt16[Op2ElementCount]; private static Vector64<UInt16> _clsVar1; private static Vector64<UInt16> _clsVar2; private Vector64<UInt16> _fld1; private Vector64<UInt16> _fld2; private DataTable _dataTable; static StoreBinaryOpTest__StorePairNonTemporal_Vector64_UInt16() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _clsVar1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _clsVar2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); } public StoreBinaryOpTest__StorePairNonTemporal_Vector64_UInt16() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _fld1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } _dataTable = new DataTable(_data1, _data2, new UInt16[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.Arm64.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); AdvSimd.Arm64.StorePairNonTemporal( (UInt16*)_dataTable.outArrayPtr, Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray2Ptr) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); AdvSimd.Arm64.StorePairNonTemporal( (UInt16*)_dataTable.outArrayPtr, AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray2Ptr)) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.StorePairNonTemporal), new Type[] { typeof(UInt16*), typeof(Vector64<UInt16>), typeof(Vector64<UInt16>) }) .Invoke(null, new object[] { Pointer.Box(_dataTable.outArrayPtr, typeof(UInt16*)), Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray2Ptr) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.StorePairNonTemporal), new Type[] { typeof(UInt16*), typeof(Vector64<UInt16>), typeof(Vector64<UInt16>) }) .Invoke(null, new object[] { Pointer.Box(_dataTable.outArrayPtr, typeof(UInt16*)), AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray2Ptr)) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); AdvSimd.Arm64.StorePairNonTemporal((UInt16*)_dataTable.outArrayPtr, _clsVar1, _clsVar2); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<UInt16>* pClsVar1 = &_clsVar1) fixed (Vector64<UInt16>* pClsVar2 = &_clsVar2) { AdvSimd.Arm64.StorePairNonTemporal( (UInt16*)_dataTable.outArrayPtr, AdvSimd.LoadVector64((UInt16*)(pClsVar1)), AdvSimd.LoadVector64((UInt16*)(pClsVar2)) ); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray2Ptr); AdvSimd.Arm64.StorePairNonTemporal((UInt16*)_dataTable.outArrayPtr, op1, op2); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray2Ptr)); AdvSimd.Arm64.StorePairNonTemporal((UInt16*)_dataTable.outArrayPtr, op1, op2); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new StoreBinaryOpTest__StorePairNonTemporal_Vector64_UInt16(); AdvSimd.Arm64.StorePairNonTemporal((UInt16*)_dataTable.outArrayPtr, test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new StoreBinaryOpTest__StorePairNonTemporal_Vector64_UInt16(); fixed (Vector64<UInt16>* pFld1 = &test._fld1) fixed (Vector64<UInt16>* pFld2 = &test._fld2) { AdvSimd.Arm64.StorePairNonTemporal( (UInt16*)_dataTable.outArrayPtr, AdvSimd.LoadVector64((UInt16*)(pFld1)), AdvSimd.LoadVector64((UInt16*)(pFld2)) ); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); AdvSimd.Arm64.StorePairNonTemporal((UInt16*)_dataTable.outArrayPtr, _fld1, _fld2); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<UInt16>* pFld1 = &_fld1) fixed (Vector64<UInt16>* pFld2 = &_fld2) { AdvSimd.Arm64.StorePairNonTemporal( (UInt16*)_dataTable.outArrayPtr, AdvSimd.LoadVector64((UInt16*)(pFld1)), AdvSimd.LoadVector64((UInt16*)(pFld2)) ); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); AdvSimd.Arm64.StorePairNonTemporal((UInt16*)_dataTable.outArrayPtr, test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); AdvSimd.Arm64.StorePairNonTemporal( (UInt16*)_dataTable.outArrayPtr, AdvSimd.LoadVector64((UInt16*)(&test._fld1)), AdvSimd.LoadVector64((UInt16*)(&test._fld2)) ); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<UInt16> op1, Vector64<UInt16> op2, void* result, [CallerMemberName] string method = "") { UInt16[] inArray1 = new UInt16[Op1ElementCount]; UInt16[] inArray2 = new UInt16[Op2ElementCount]; UInt16[] outArray = new UInt16[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)(RetElementCount * Unsafe.SizeOf<UInt16>())); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { UInt16[] inArray1 = new UInt16[Op1ElementCount]; UInt16[] inArray2 = new UInt16[Op2ElementCount]; UInt16[] outArray = new UInt16[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)(RetElementCount * Unsafe.SizeOf<UInt16>())); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(UInt16[] firstOp, UInt16[] secondOp, UInt16[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (int i = 0; i < RetElementCount; i++) { if (Helpers.Concat(firstOp, secondOp, i) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd.Arm64)}.{nameof(AdvSimd.Arm64.StorePairNonTemporal)}<UInt16>(Vector64<UInt16>, Vector64<UInt16>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/Methodical/Arrays/lcs/lcsmax.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; namespace JitTest { internal class LCS { private const int RANK = 8; private static String buildLCS(int[,,,,,,,] b, char[] X, int[] ind) { for (int i = 0; i < RANK; i++) if (ind[i] == 0) return ""; int L = b[ind[0], ind[1], ind[2], ind[3], ind[4], ind[5], ind[6], ind[7]]; if (L == RANK) { for (int i = 0; i < RANK; i++) ind[i]--; int idx = ind[0]; return buildLCS(b, X, ind) + X[idx]; } if (L >= 0 && L < RANK) { ind[L]--; return buildLCS(b, X, ind); } throw new Exception(); } private static void findLCS(int[,,,,,,,] c, int[,,,,,,,] b, char[][] seq, int[] len) { int[] ind = new int[RANK]; for (int i = 0; i < RANK; i++) ind[i] = 1; int R = 0; while (R < RANK) { bool eqFlag = true; for (int i = 1; i < RANK; i++) { if (seq[i][ind[i] - 1] != seq[i - 1][ind[i - 1] - 1]) { eqFlag = false; break; } } if (eqFlag) { c[ind[0], ind[1], ind[2], ind[3], ind[4], ind[5], ind[6], ind[7]] = c[ind[0] - 1, ind[1] - 1, ind[2] - 1, ind[3] - 1, ind[4] - 1, ind[5] - 1, ind[6] - 1, ind[7] - 1] + 1; b[ind[0], ind[1], ind[2], ind[3], ind[4], ind[5], ind[6], ind[7]] = RANK; } else { R = -1; int M = -1; for (int i = 0; i < RANK; i++) { ind[i]--; if (c[ind[0], ind[1], ind[2], ind[3], ind[4], ind[5], ind[6], ind[7]] > M) { R = i; M = c[ind[0], ind[1], ind[2], ind[3], ind[4], ind[5], ind[6], ind[7]]; } ind[i]++; } if (R < 0 || M < 0) throw new Exception(); c[ind[0], ind[1], ind[2], ind[3], ind[4], ind[5], ind[6], ind[7]] = M; b[ind[0], ind[1], ind[2], ind[3], ind[4], ind[5], ind[6], ind[7]] = R; } R = 0; while (R < RANK) { ind[R]++; if (ind[R] < len[R]) break; ind[R++] = 1; } } } private static int Main() { Console.WriteLine("Test searches for longest common subsequence of 8 strings\n\n"); String[] str = new String[RANK] { "abdc", "badc", "bdacw", "bdca", "bcfdc", "bddsc", "bdccca", "bbdc" }; int[] len = new int[RANK]; char[][] seq = new char[RANK][]; for (int i = 0; i < RANK; i++) { len[i] = str[i].Length + 1; seq[i] = str[i].ToCharArray(); } int[,,,,,,,] c = new int[len[0], len[1], len[2], len[3], len[4], len[5], len[6], len[7]]; int[,,,,,,,] b = new int[len[0], len[1], len[2], len[3], len[4], len[5], len[6], len[7]]; findLCS(c, b, seq, len); for (int i = 0; i < RANK; i++) len[i]--; if ("bdc" == buildLCS(b, seq[0], len)) { Console.WriteLine("Test passed"); return 100; } else { Console.WriteLine("Test failed."); return 0; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; namespace JitTest { internal class LCS { private const int RANK = 8; private static String buildLCS(int[,,,,,,,] b, char[] X, int[] ind) { for (int i = 0; i < RANK; i++) if (ind[i] == 0) return ""; int L = b[ind[0], ind[1], ind[2], ind[3], ind[4], ind[5], ind[6], ind[7]]; if (L == RANK) { for (int i = 0; i < RANK; i++) ind[i]--; int idx = ind[0]; return buildLCS(b, X, ind) + X[idx]; } if (L >= 0 && L < RANK) { ind[L]--; return buildLCS(b, X, ind); } throw new Exception(); } private static void findLCS(int[,,,,,,,] c, int[,,,,,,,] b, char[][] seq, int[] len) { int[] ind = new int[RANK]; for (int i = 0; i < RANK; i++) ind[i] = 1; int R = 0; while (R < RANK) { bool eqFlag = true; for (int i = 1; i < RANK; i++) { if (seq[i][ind[i] - 1] != seq[i - 1][ind[i - 1] - 1]) { eqFlag = false; break; } } if (eqFlag) { c[ind[0], ind[1], ind[2], ind[3], ind[4], ind[5], ind[6], ind[7]] = c[ind[0] - 1, ind[1] - 1, ind[2] - 1, ind[3] - 1, ind[4] - 1, ind[5] - 1, ind[6] - 1, ind[7] - 1] + 1; b[ind[0], ind[1], ind[2], ind[3], ind[4], ind[5], ind[6], ind[7]] = RANK; } else { R = -1; int M = -1; for (int i = 0; i < RANK; i++) { ind[i]--; if (c[ind[0], ind[1], ind[2], ind[3], ind[4], ind[5], ind[6], ind[7]] > M) { R = i; M = c[ind[0], ind[1], ind[2], ind[3], ind[4], ind[5], ind[6], ind[7]]; } ind[i]++; } if (R < 0 || M < 0) throw new Exception(); c[ind[0], ind[1], ind[2], ind[3], ind[4], ind[5], ind[6], ind[7]] = M; b[ind[0], ind[1], ind[2], ind[3], ind[4], ind[5], ind[6], ind[7]] = R; } R = 0; while (R < RANK) { ind[R]++; if (ind[R] < len[R]) break; ind[R++] = 1; } } } private static int Main() { Console.WriteLine("Test searches for longest common subsequence of 8 strings\n\n"); String[] str = new String[RANK] { "abdc", "badc", "bdacw", "bdca", "bcfdc", "bddsc", "bdccca", "bbdc" }; int[] len = new int[RANK]; char[][] seq = new char[RANK][]; for (int i = 0; i < RANK; i++) { len[i] = str[i].Length + 1; seq[i] = str[i].ToCharArray(); } int[,,,,,,,] c = new int[len[0], len[1], len[2], len[3], len[4], len[5], len[6], len[7]]; int[,,,,,,,] b = new int[len[0], len[1], len[2], len[3], len[4], len[5], len[6], len[7]]; findLCS(c, b, seq, len); for (int i = 0; i < RANK; i++) len[i]--; if ("bdc" == buildLCS(b, seq[0], len)) { Console.WriteLine("Test passed"); return 100; } else { Console.WriteLine("Test failed."); return 0; } } } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/X509Certificates/X509Pal.iOS.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.Security.Cryptography.Apple; namespace System.Security.Cryptography.X509Certificates { internal static partial class X509Pal { private static partial IX509Pal BuildSingleton() { return new AppleX509Pal(); } private sealed partial class AppleX509Pal : ManagedX509ExtensionProcessor, IX509Pal { public AsymmetricAlgorithm DecodePublicKey(Oid oid, byte[] encodedKeyValue, byte[] encodedParameters, ICertificatePal? certificatePal) { if (oid.Value != Oids.Rsa) { throw new NotSupportedException(SR.NotSupported_KeyAlgorithm); } if (certificatePal is AppleCertificatePal applePal) { SafeSecKeyRefHandle key = Interop.AppleCrypto.X509GetPublicKey(applePal.CertificateHandle); Debug.Assert(!key.IsInvalid); return new RSAImplementation.RSASecurityTransforms(key); } else { RSA rsa = RSA.Create(); try { rsa.ImportRSAPublicKey(new ReadOnlySpan<byte>(encodedKeyValue), out _); return rsa; } catch (Exception) { rsa.Dispose(); throw; } } } public X509ContentType GetCertContentType(ReadOnlySpan<byte> rawData) { const int errSecUnknownFormat = -25257; if (rawData == null || rawData.Length == 0) { // Throw to match Windows and Unix behavior. throw Interop.AppleCrypto.CreateExceptionForOSStatus(errSecUnknownFormat); } X509ContentType result = X509ContentType.Unknown; AppleCertificatePal.TryDecodePem( rawData, (derData, contentType) => { result = contentType; return false; }); if (result == X509ContentType.Unknown) { result = AppleCertificatePal.GetDerCertContentType(rawData); } if (result == X509ContentType.Unknown) { // Throw to match Windows and Unix behavior. throw Interop.AppleCrypto.CreateExceptionForOSStatus(errSecUnknownFormat); } return result; } public X509ContentType GetCertContentType(string fileName) { return GetCertContentType(System.IO.File.ReadAllBytes(fileName)); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.Security.Cryptography.Apple; namespace System.Security.Cryptography.X509Certificates { internal static partial class X509Pal { private static partial IX509Pal BuildSingleton() { return new AppleX509Pal(); } private sealed partial class AppleX509Pal : ManagedX509ExtensionProcessor, IX509Pal { public AsymmetricAlgorithm DecodePublicKey(Oid oid, byte[] encodedKeyValue, byte[] encodedParameters, ICertificatePal? certificatePal) { if (oid.Value != Oids.Rsa) { throw new NotSupportedException(SR.NotSupported_KeyAlgorithm); } if (certificatePal is AppleCertificatePal applePal) { SafeSecKeyRefHandle key = Interop.AppleCrypto.X509GetPublicKey(applePal.CertificateHandle); Debug.Assert(!key.IsInvalid); return new RSAImplementation.RSASecurityTransforms(key); } else { RSA rsa = RSA.Create(); try { rsa.ImportRSAPublicKey(new ReadOnlySpan<byte>(encodedKeyValue), out _); return rsa; } catch (Exception) { rsa.Dispose(); throw; } } } public X509ContentType GetCertContentType(ReadOnlySpan<byte> rawData) { const int errSecUnknownFormat = -25257; if (rawData == null || rawData.Length == 0) { // Throw to match Windows and Unix behavior. throw Interop.AppleCrypto.CreateExceptionForOSStatus(errSecUnknownFormat); } X509ContentType result = X509ContentType.Unknown; AppleCertificatePal.TryDecodePem( rawData, (derData, contentType) => { result = contentType; return false; }); if (result == X509ContentType.Unknown) { result = AppleCertificatePal.GetDerCertContentType(rawData); } if (result == X509ContentType.Unknown) { // Throw to match Windows and Unix behavior. throw Interop.AppleCrypto.CreateExceptionForOSStatus(errSecUnknownFormat); } return result; } public X509ContentType GetCertContentType(string fileName) { return GetCertContentType(System.IO.File.ReadAllBytes(fileName)); } } } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/Methodical/eh/leaves/oponerror_r.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>None</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="oponerror.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\..\..\common\eh_common.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>None</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="oponerror.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\..\..\common\eh_common.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/mono/mono/tests/soft-float-tests.cs
using System; public class Driver { static float D = 3; public static int StoreStaticField (float y) { D = y; return 0; } public static float ReadSingle () { Object o = ""; o.ToString (); return 64f; } public static int TestStoreArray() { float[] arr = new float[10]; arr[0] = ReadSingle(); return 0; } public static int Main () { int res = 0; res = StoreStaticField (128f); res |= TestStoreArray(); return res; } }
using System; public class Driver { static float D = 3; public static int StoreStaticField (float y) { D = y; return 0; } public static float ReadSingle () { Object o = ""; o.ToString (); return 64f; } public static int TestStoreArray() { float[] arr = new float[10]; arr[0] = ReadSingle(); return 0; } public static int Main () { int res = 0; res = StoreStaticField (128f); res |= TestStoreArray(); return res; } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/libraries/Common/src/Interop/Windows/User32/Interop.SetClassLongPtr.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; internal static partial class Interop { internal static partial class User32 { [LibraryImport(Libraries.User32)] public static partial IntPtr SetClassLongPtrW(IntPtr hwnd, int nIndex, IntPtr dwNewLong); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; internal static partial class Interop { internal static partial class User32 { [LibraryImport(Libraries.User32)] public static partial IntPtr SetClassLongPtrW(IntPtr hwnd, int nIndex, IntPtr dwNewLong); } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/libraries/System.Private.Xml/tests/XmlConvert/VerifyNameTests4.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using OLEDB.Test.ModuleCore; namespace System.Xml.Tests { internal class VerifyNameTests4 : CTestCase { public override void AddChildren() { AddChild(new CVariation(TFS_469847) { Attribute = new Variation("Test for VerifyNMTOKEN(foo\U00010000bar)") { Param = 1 } }); AddChild(new CVariation(TFS_469847) { Attribute = new Variation("Test for VerifyNCName(foo\U00010000bar)") { Param = 3 } }); AddChild(new CVariation(TFS_469847) { Attribute = new Variation("Test for VerifyTOKEN(foo\U00010000bar)") { Param = 4 } }); AddChild(new CVariation(TFS_469847) { Attribute = new Variation("Test for VerifyXmlChars(foo\U00010000bar)") { Param = 5 } }); AddChild(new CVariation(TFS_469847) { Attribute = new Variation("Test for VerifyName(foo\U00010000bar)") { Param = 2 } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("9.Test for VerifyXmlChars(a\udfff\udbffb)") { Params = new object[] { 9, typeof(XmlException) } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("5.Test for VerifyXmlChars(a\udbff\udfffb)") { Params = new object[] { 5, null } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("8.Test for VerifyXmlChars(abcddcba\udbff\udfff)") { Params = new object[] { 8, null } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("1.Test for VerifyXmlChars(null)") { Params = new object[] { 1, typeof(ArgumentNullException) } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("10.Test for VerifyXmlChars(a\udfffb)") { Params = new object[] { 10, typeof(XmlException) } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("11.Test for VerifyXmlChars(a\udbffb)") { Params = new object[] { 11, typeof(XmlException) } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("12.Test for VerifyXmlChars(abcd\udbff \udfffdcba)") { Params = new object[] { 12, typeof(XmlException) } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("13.Test for VerifyXmlChars(\uffffabcd\ud801\udc01dcba)") { Params = new object[] { 13, typeof(XmlException) } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("14.Test for VerifyXmlChars(abcd\uffff\ud801\udc01dcba)") { Params = new object[] { 14, typeof(XmlException) } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("15.Test for VerifyXmlChars(abcd\ud801\udc01dcba\uffff)") { Params = new object[] { 15, typeof(XmlException) } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("6.Test for VerifyXmlChars(abcd\udbff\udfffdcba)") { Params = new object[] { 6, null } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("7.Test for VerifyXmlChars(\udbff\udfffabcddcba)") { Params = new object[] { 7, null } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("2.Test for VerifyXmlChars(string.Empty)") { Params = new object[] { 2, null } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("3.Test for VerifyXmlChars(a)") { Params = new object[] { 3, null } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("4.Test for VerifyXmlChars(ab)") { Params = new object[] { 4, null } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (null)") { Params = new object[] { null, typeof(ArgumentNullException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (string.Empty)") { Params = new object[] { "", null } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (!)") { Params = new object[] { "!", null } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (ab)") { Params = new object[] { "ab", null } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (+,./)") { Params = new object[] { "+,./", null } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (a-zA-Z0-9-'( )+,./:=?;!*#\n@$_%\\r)") { Params = new object[] { "a-zA-Z0-9-'( )+,./:=?;!*#\n@$_%\r", null } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (\\udb01\\udc01abc)") { Params = new object[] { "\udb01\udc01abc", typeof(XmlException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (a\\udb01\\udc01bc)") { Params = new object[] { "a\udb01\udc01bc", typeof(XmlException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (abc\\udb01\\udc01)") { Params = new object[] { "abc\udb01\udc01", typeof(XmlException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (\\udb01abc)") { Params = new object[] { "\udb01abc", typeof(XmlException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (a\\udb01abc)") { Params = new object[] { "a\udb01abc", typeof(XmlException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (abc\\udb01)") { Params = new object[] { "abc\udb01", typeof(XmlException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (\\udf01abc)") { Params = new object[] { "\udf01abc", typeof(XmlException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (a\\udf01abc)") { Params = new object[] { "a\udf01abc", typeof(XmlException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (abc\\udf01)") { Params = new object[] { "abc\udf01", typeof(XmlException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (\\uffffabc)") { Params = new object[] { "\uffffabc", typeof(XmlException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (a\\uffffabc)") { Params = new object[] { "a\uffffabc", typeof(XmlException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (abc\\uffff)") { Params = new object[] { "abc\uffff", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\t\t)") { Params = new object[] { "\t\t", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace( )") { Params = new object[] { " ", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\n)") { Params = new object[] { "\n\n", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\r\r)") { Params = new object[] { "\r\r", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(null)") { Params = new object[] { null, typeof(ArgumentNullException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace( )") { Params = new object[] { " ", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\n\n)") { Params = new object[] { "\n\n\n", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\r\r\r)") { Params = new object[] { "\r\r\r", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\t\t\t)") { Params = new object[] { "\t\t\t", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace( )") { Params = new object[] { " ", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\n\n\n)") { Params = new object[] { "\n\n\n\n", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\r\r\r\r)") { Params = new object[] { "\r\r\r\r", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\t\t\t\t)") { Params = new object[] { "\t\t\t\t", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\r\t\n)") { Params = new object[] { "\n\r\t\n", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\r\t )") { Params = new object[] { "\n\r\t ", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(a\n\r\t\n)") { Params = new object[] { "a\n\r\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\r\t\na)") { Params = new object[] { "\n\r\t\na", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\rb\t\n)") { Params = new object[] { "\n\rb\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\ud801\udc01\n\r\t\n)") { Params = new object[] { "\ud801\udc01\n\r\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\r\t\n\ud801\udc01)") { Params = new object[] { "\n\r\t\n\ud801\udc01", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\\n\\r\\ud801\\udc01\\t\\n)") { Params = new object[] { "\n\r\ud801\udc01\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\\udc01\\ud801\\n\\r\\t\\n)") { Params = new object[] { "\ufffd\ufffd\ufffd\ufffd\n\r\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\\n\\r\\t\\n\\udc01\\ud801)") { Params = new object[] { "\n\r\t\n\ufffd\ufffd\ufffd\ufffd", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\\n\\r\\udc01\\ud801\\t\n)") { Params = new object[] { "\n\r\ufffd\ufffd\ufffd\ufffd\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\ud801\n\r\t\n)") { Params = new object[] { "\ufffd\ufffd\n\r\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\r\t\n\udc01)") { Params = new object[] { "\n\r\t\n\ufffd\ufffd", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\r\ud801\t\n)") { Params = new object[] { "\n\r\ufffd\ufffd\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\t)") { Params = new object[] { "\t", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\r\t\n\udc01)") { Params = new object[] { "\n\r\t\n\ufffd\ufffd", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\r\udc01\t\n)") { Params = new object[] { "\n\r\ufffd\ufffd\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\uffff\n\r\t\n)") { Params = new object[] { "\uffff\n\r\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\r\t\n\uffff)") { Params = new object[] { "\n\r\t\n\uffff", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\r\uffff\t\n)") { Params = new object[] { "\n\r\uffff\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(string.Empty)") { Params = new object[] { "", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\udc01\n\r\t\n)") { Params = new object[] { "\ufffd\ufffd\n\r\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace( )") { Params = new object[] { " ", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n)") { Params = new object[] { "\n", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\r)") { Params = new object[] { "\r", null } } }); } private int TFS_469847() { var param = (int)CurVariation.Param; try { switch (param) { case 1: XmlConvert.VerifyNMTOKEN("foo\ud800\udc00bar"); break; case 2: XmlConvert.VerifyName("foo\ud800\udc00bar"); break; case 3: XmlConvert.VerifyNCName("foo\ud800\udc00bar"); break; case 5: XmlConvert.VerifyXmlChars("foo\ud800\udc00bar"); break; } } catch (XmlException e) { CError.WriteLine(e.Message); return TEST_PASS; } return (param == 4 || param == 5) ? TEST_PASS : TEST_FAIL; } private int VerifyPublicId() { var inputString = (string)CurVariation.Params[0]; var exceptionType = (Type)CurVariation.Params[1]; try { string outString = XmlConvert.VerifyPublicId(inputString); CError.Compare(inputString, outString, "Content"); } catch (ArgumentNullException e) { return (exceptionType != null && e.GetType().Name == exceptionType.Name) ? TEST_PASS : TEST_FAIL; } catch (XmlException e) { CError.WriteLine(e.LineNumber); CError.WriteLine(e.LinePosition); return (exceptionType != null && e.GetType().Name == exceptionType.Name) ? TEST_PASS : TEST_FAIL; } return exceptionType == null ? TEST_PASS : TEST_FAIL; } /// <summary> /// Params[] = { inputString, shouldThrow } /// </summary> private int VerifyWhitespace() { var inputString = (string)CurVariation.Params[0]; var exceptionType = (Type)CurVariation.Params[1]; try { string outString = XmlConvert.VerifyWhitespace(inputString); CError.Compare(inputString, outString, "Content"); } catch (ArgumentNullException e) { return (exceptionType != null && e.GetType().Name == exceptionType.Name) ? TEST_PASS : TEST_FAIL; } catch (XmlException e) { CError.WriteLine(e.LineNumber); CError.WriteLine(e.LinePosition); return (exceptionType != null && e.GetType().Name == exceptionType.Name) ? TEST_PASS : TEST_FAIL; } return exceptionType == null ? TEST_PASS : TEST_FAIL; } private int VerifyXmlCharsTests() { var param = (int)CurVariation.Params[0]; var exceptionType = (Type)CurVariation.Params[1]; string inputString = string.Empty; switch (param) { case 1: inputString = null; break; case 2: inputString = ""; break; case 3: inputString = "a"; break; case 4: inputString = "ab"; break; case 5: inputString = "a\udbff\udfffb"; break; case 6: inputString = "abcd\udbff\udfffdcba"; break; case 7: inputString = "\udbff\udfffabcddcba"; break; case 8: inputString = "abcddcba\udbff\udfff"; break; case 9: inputString = "a\udfff\udbffb"; break; case 10: inputString = "a\udfffb"; break; case 11: inputString = "a\udbffb"; break; case 12: inputString = "abcd\udbff \udfffdcba"; break; case 13: inputString = "\uffffabcd\ud801\udc01dcba"; break; case 14: inputString = "abcd\uffff\ud801\udc01dcba"; break; case 15: inputString = "abcd\ud801\udc01dcba\uffff"; break; } try { string outString = XmlConvert.VerifyXmlChars(inputString); CError.Compare(inputString, outString, "Content"); } catch (ArgumentNullException e) { return (exceptionType != null && e.GetType().Name == exceptionType.Name) ? TEST_PASS : TEST_FAIL; } catch (XmlException e) { CError.WriteLine(e.LineNumber); CError.WriteLine(e.LinePosition); return (exceptionType != null && e.GetType().Name == exceptionType.Name) ? TEST_PASS : TEST_FAIL; } return exceptionType == null ? TEST_PASS : TEST_FAIL; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using OLEDB.Test.ModuleCore; namespace System.Xml.Tests { internal class VerifyNameTests4 : CTestCase { public override void AddChildren() { AddChild(new CVariation(TFS_469847) { Attribute = new Variation("Test for VerifyNMTOKEN(foo\U00010000bar)") { Param = 1 } }); AddChild(new CVariation(TFS_469847) { Attribute = new Variation("Test for VerifyNCName(foo\U00010000bar)") { Param = 3 } }); AddChild(new CVariation(TFS_469847) { Attribute = new Variation("Test for VerifyTOKEN(foo\U00010000bar)") { Param = 4 } }); AddChild(new CVariation(TFS_469847) { Attribute = new Variation("Test for VerifyXmlChars(foo\U00010000bar)") { Param = 5 } }); AddChild(new CVariation(TFS_469847) { Attribute = new Variation("Test for VerifyName(foo\U00010000bar)") { Param = 2 } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("9.Test for VerifyXmlChars(a\udfff\udbffb)") { Params = new object[] { 9, typeof(XmlException) } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("5.Test for VerifyXmlChars(a\udbff\udfffb)") { Params = new object[] { 5, null } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("8.Test for VerifyXmlChars(abcddcba\udbff\udfff)") { Params = new object[] { 8, null } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("1.Test for VerifyXmlChars(null)") { Params = new object[] { 1, typeof(ArgumentNullException) } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("10.Test for VerifyXmlChars(a\udfffb)") { Params = new object[] { 10, typeof(XmlException) } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("11.Test for VerifyXmlChars(a\udbffb)") { Params = new object[] { 11, typeof(XmlException) } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("12.Test for VerifyXmlChars(abcd\udbff \udfffdcba)") { Params = new object[] { 12, typeof(XmlException) } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("13.Test for VerifyXmlChars(\uffffabcd\ud801\udc01dcba)") { Params = new object[] { 13, typeof(XmlException) } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("14.Test for VerifyXmlChars(abcd\uffff\ud801\udc01dcba)") { Params = new object[] { 14, typeof(XmlException) } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("15.Test for VerifyXmlChars(abcd\ud801\udc01dcba\uffff)") { Params = new object[] { 15, typeof(XmlException) } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("6.Test for VerifyXmlChars(abcd\udbff\udfffdcba)") { Params = new object[] { 6, null } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("7.Test for VerifyXmlChars(\udbff\udfffabcddcba)") { Params = new object[] { 7, null } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("2.Test for VerifyXmlChars(string.Empty)") { Params = new object[] { 2, null } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("3.Test for VerifyXmlChars(a)") { Params = new object[] { 3, null } } }); AddChild(new CVariation(VerifyXmlCharsTests) { Attribute = new Variation("4.Test for VerifyXmlChars(ab)") { Params = new object[] { 4, null } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (null)") { Params = new object[] { null, typeof(ArgumentNullException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (string.Empty)") { Params = new object[] { "", null } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (!)") { Params = new object[] { "!", null } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (ab)") { Params = new object[] { "ab", null } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (+,./)") { Params = new object[] { "+,./", null } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (a-zA-Z0-9-'( )+,./:=?;!*#\n@$_%\\r)") { Params = new object[] { "a-zA-Z0-9-'( )+,./:=?;!*#\n@$_%\r", null } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (\\udb01\\udc01abc)") { Params = new object[] { "\udb01\udc01abc", typeof(XmlException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (a\\udb01\\udc01bc)") { Params = new object[] { "a\udb01\udc01bc", typeof(XmlException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (abc\\udb01\\udc01)") { Params = new object[] { "abc\udb01\udc01", typeof(XmlException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (\\udb01abc)") { Params = new object[] { "\udb01abc", typeof(XmlException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (a\\udb01abc)") { Params = new object[] { "a\udb01abc", typeof(XmlException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (abc\\udb01)") { Params = new object[] { "abc\udb01", typeof(XmlException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (\\udf01abc)") { Params = new object[] { "\udf01abc", typeof(XmlException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (a\\udf01abc)") { Params = new object[] { "a\udf01abc", typeof(XmlException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (abc\\udf01)") { Params = new object[] { "abc\udf01", typeof(XmlException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (\\uffffabc)") { Params = new object[] { "\uffffabc", typeof(XmlException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (a\\uffffabc)") { Params = new object[] { "a\uffffabc", typeof(XmlException) } } }); AddChild(new CVariation(VerifyPublicId) { Attribute = new Variation("Test for VerifyPublicId (abc\\uffff)") { Params = new object[] { "abc\uffff", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\t\t)") { Params = new object[] { "\t\t", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace( )") { Params = new object[] { " ", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\n)") { Params = new object[] { "\n\n", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\r\r)") { Params = new object[] { "\r\r", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(null)") { Params = new object[] { null, typeof(ArgumentNullException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace( )") { Params = new object[] { " ", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\n\n)") { Params = new object[] { "\n\n\n", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\r\r\r)") { Params = new object[] { "\r\r\r", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\t\t\t)") { Params = new object[] { "\t\t\t", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace( )") { Params = new object[] { " ", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\n\n\n)") { Params = new object[] { "\n\n\n\n", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\r\r\r\r)") { Params = new object[] { "\r\r\r\r", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\t\t\t\t)") { Params = new object[] { "\t\t\t\t", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\r\t\n)") { Params = new object[] { "\n\r\t\n", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\r\t )") { Params = new object[] { "\n\r\t ", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(a\n\r\t\n)") { Params = new object[] { "a\n\r\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\r\t\na)") { Params = new object[] { "\n\r\t\na", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\rb\t\n)") { Params = new object[] { "\n\rb\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\ud801\udc01\n\r\t\n)") { Params = new object[] { "\ud801\udc01\n\r\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\r\t\n\ud801\udc01)") { Params = new object[] { "\n\r\t\n\ud801\udc01", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\\n\\r\\ud801\\udc01\\t\\n)") { Params = new object[] { "\n\r\ud801\udc01\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\\udc01\\ud801\\n\\r\\t\\n)") { Params = new object[] { "\ufffd\ufffd\ufffd\ufffd\n\r\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\\n\\r\\t\\n\\udc01\\ud801)") { Params = new object[] { "\n\r\t\n\ufffd\ufffd\ufffd\ufffd", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\\n\\r\\udc01\\ud801\\t\n)") { Params = new object[] { "\n\r\ufffd\ufffd\ufffd\ufffd\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\ud801\n\r\t\n)") { Params = new object[] { "\ufffd\ufffd\n\r\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\r\t\n\udc01)") { Params = new object[] { "\n\r\t\n\ufffd\ufffd", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\r\ud801\t\n)") { Params = new object[] { "\n\r\ufffd\ufffd\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\t)") { Params = new object[] { "\t", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\r\t\n\udc01)") { Params = new object[] { "\n\r\t\n\ufffd\ufffd", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\r\udc01\t\n)") { Params = new object[] { "\n\r\ufffd\ufffd\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\uffff\n\r\t\n)") { Params = new object[] { "\uffff\n\r\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\r\t\n\uffff)") { Params = new object[] { "\n\r\t\n\uffff", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n\r\uffff\t\n)") { Params = new object[] { "\n\r\uffff\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(string.Empty)") { Params = new object[] { "", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\udc01\n\r\t\n)") { Params = new object[] { "\ufffd\ufffd\n\r\t\n", typeof(XmlException) } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace( )") { Params = new object[] { " ", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\n)") { Params = new object[] { "\n", null } } }); AddChild(new CVariation(VerifyWhitespace) { Attribute = new Variation("Test for VerifyWhitespace(\r)") { Params = new object[] { "\r", null } } }); } private int TFS_469847() { var param = (int)CurVariation.Param; try { switch (param) { case 1: XmlConvert.VerifyNMTOKEN("foo\ud800\udc00bar"); break; case 2: XmlConvert.VerifyName("foo\ud800\udc00bar"); break; case 3: XmlConvert.VerifyNCName("foo\ud800\udc00bar"); break; case 5: XmlConvert.VerifyXmlChars("foo\ud800\udc00bar"); break; } } catch (XmlException e) { CError.WriteLine(e.Message); return TEST_PASS; } return (param == 4 || param == 5) ? TEST_PASS : TEST_FAIL; } private int VerifyPublicId() { var inputString = (string)CurVariation.Params[0]; var exceptionType = (Type)CurVariation.Params[1]; try { string outString = XmlConvert.VerifyPublicId(inputString); CError.Compare(inputString, outString, "Content"); } catch (ArgumentNullException e) { return (exceptionType != null && e.GetType().Name == exceptionType.Name) ? TEST_PASS : TEST_FAIL; } catch (XmlException e) { CError.WriteLine(e.LineNumber); CError.WriteLine(e.LinePosition); return (exceptionType != null && e.GetType().Name == exceptionType.Name) ? TEST_PASS : TEST_FAIL; } return exceptionType == null ? TEST_PASS : TEST_FAIL; } /// <summary> /// Params[] = { inputString, shouldThrow } /// </summary> private int VerifyWhitespace() { var inputString = (string)CurVariation.Params[0]; var exceptionType = (Type)CurVariation.Params[1]; try { string outString = XmlConvert.VerifyWhitespace(inputString); CError.Compare(inputString, outString, "Content"); } catch (ArgumentNullException e) { return (exceptionType != null && e.GetType().Name == exceptionType.Name) ? TEST_PASS : TEST_FAIL; } catch (XmlException e) { CError.WriteLine(e.LineNumber); CError.WriteLine(e.LinePosition); return (exceptionType != null && e.GetType().Name == exceptionType.Name) ? TEST_PASS : TEST_FAIL; } return exceptionType == null ? TEST_PASS : TEST_FAIL; } private int VerifyXmlCharsTests() { var param = (int)CurVariation.Params[0]; var exceptionType = (Type)CurVariation.Params[1]; string inputString = string.Empty; switch (param) { case 1: inputString = null; break; case 2: inputString = ""; break; case 3: inputString = "a"; break; case 4: inputString = "ab"; break; case 5: inputString = "a\udbff\udfffb"; break; case 6: inputString = "abcd\udbff\udfffdcba"; break; case 7: inputString = "\udbff\udfffabcddcba"; break; case 8: inputString = "abcddcba\udbff\udfff"; break; case 9: inputString = "a\udfff\udbffb"; break; case 10: inputString = "a\udfffb"; break; case 11: inputString = "a\udbffb"; break; case 12: inputString = "abcd\udbff \udfffdcba"; break; case 13: inputString = "\uffffabcd\ud801\udc01dcba"; break; case 14: inputString = "abcd\uffff\ud801\udc01dcba"; break; case 15: inputString = "abcd\ud801\udc01dcba\uffff"; break; } try { string outString = XmlConvert.VerifyXmlChars(inputString); CError.Compare(inputString, outString, "Content"); } catch (ArgumentNullException e) { return (exceptionType != null && e.GetType().Name == exceptionType.Name) ? TEST_PASS : TEST_FAIL; } catch (XmlException e) { CError.WriteLine(e.LineNumber); CError.WriteLine(e.LinePosition); return (exceptionType != null && e.GetType().Name == exceptionType.Name) ? TEST_PASS : TEST_FAIL; } return exceptionType == null ? TEST_PASS : TEST_FAIL; } } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/MultiplySubtractBySelectedScalar.Vector64.UInt16.Vector128.UInt16.7.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void MultiplySubtractBySelectedScalar_Vector64_UInt16_Vector128_UInt16_7() { var test = new SimpleTernaryOpTest__MultiplySubtractBySelectedScalar_Vector64_UInt16_Vector128_UInt16_7(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleTernaryOpTest__MultiplySubtractBySelectedScalar_Vector64_UInt16_Vector128_UInt16_7 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] inArray3; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle inHandle3; private GCHandle outHandle; private ulong alignment; public DataTable(UInt16[] inArray1, UInt16[] inArray2, UInt16[] inArray3, UInt16[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt16>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt16>(); int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<UInt16>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt16>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inArray3 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt16, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt16, byte>(ref inArray2[0]), (uint)sizeOfinArray2); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<UInt16, byte>(ref inArray3[0]), (uint)sizeOfinArray3); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); inHandle3.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<UInt16> _fld1; public Vector64<UInt16> _fld2; public Vector128<UInt16> _fld3; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref testStruct._fld3), ref Unsafe.As<UInt16, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); return testStruct; } public void RunStructFldScenario(SimpleTernaryOpTest__MultiplySubtractBySelectedScalar_Vector64_UInt16_Vector128_UInt16_7 testClass) { var result = AdvSimd.MultiplySubtractBySelectedScalar(_fld1, _fld2, _fld3, 7); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleTernaryOpTest__MultiplySubtractBySelectedScalar_Vector64_UInt16_Vector128_UInt16_7 testClass) { fixed (Vector64<UInt16>* pFld1 = &_fld1) fixed (Vector64<UInt16>* pFld2 = &_fld2) fixed (Vector128<UInt16>* pFld3 = &_fld3) { var result = AdvSimd.MultiplySubtractBySelectedScalar( AdvSimd.LoadVector64((UInt16*)(pFld1)), AdvSimd.LoadVector64((UInt16*)(pFld2)), AdvSimd.LoadVector128((UInt16*)(pFld3)), 7 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<UInt16>>() / sizeof(UInt16); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<UInt16>>() / sizeof(UInt16); private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector128<UInt16>>() / sizeof(UInt16); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<UInt16>>() / sizeof(UInt16); private static readonly byte Imm = 7; private static UInt16[] _data1 = new UInt16[Op1ElementCount]; private static UInt16[] _data2 = new UInt16[Op2ElementCount]; private static UInt16[] _data3 = new UInt16[Op3ElementCount]; private static Vector64<UInt16> _clsVar1; private static Vector64<UInt16> _clsVar2; private static Vector128<UInt16> _clsVar3; private Vector64<UInt16> _fld1; private Vector64<UInt16> _fld2; private Vector128<UInt16> _fld3; private DataTable _dataTable; static SimpleTernaryOpTest__MultiplySubtractBySelectedScalar_Vector64_UInt16_Vector128_UInt16_7() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _clsVar1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _clsVar2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _clsVar3), ref Unsafe.As<UInt16, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); } public SimpleTernaryOpTest__MultiplySubtractBySelectedScalar_Vector64_UInt16_Vector128_UInt16_7() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _fld1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _fld3), ref Unsafe.As<UInt16, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt16(); } _dataTable = new DataTable(_data1, _data2, _data3, new UInt16[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.MultiplySubtractBySelectedScalar( Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray3Ptr), 7 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.MultiplySubtractBySelectedScalar( AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray2Ptr)), AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray3Ptr)), 7 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplySubtractBySelectedScalar), new Type[] { typeof(Vector64<UInt16>), typeof(Vector64<UInt16>), typeof(Vector128<UInt16>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray3Ptr), (byte)7 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<UInt16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplySubtractBySelectedScalar), new Type[] { typeof(Vector64<UInt16>), typeof(Vector64<UInt16>), typeof(Vector128<UInt16>), typeof(byte) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray2Ptr)), AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray3Ptr)), (byte)7 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<UInt16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.MultiplySubtractBySelectedScalar( _clsVar1, _clsVar2, _clsVar3, 7 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<UInt16>* pClsVar1 = &_clsVar1) fixed (Vector64<UInt16>* pClsVar2 = &_clsVar2) fixed (Vector128<UInt16>* pClsVar3 = &_clsVar3) { var result = AdvSimd.MultiplySubtractBySelectedScalar( AdvSimd.LoadVector64((UInt16*)(pClsVar1)), AdvSimd.LoadVector64((UInt16*)(pClsVar2)), AdvSimd.LoadVector128((UInt16*)(pClsVar3)), 7 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray2Ptr); var op3 = Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray3Ptr); var result = AdvSimd.MultiplySubtractBySelectedScalar(op1, op2, op3, 7); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray2Ptr)); var op3 = AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray3Ptr)); var result = AdvSimd.MultiplySubtractBySelectedScalar(op1, op2, op3, 7); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleTernaryOpTest__MultiplySubtractBySelectedScalar_Vector64_UInt16_Vector128_UInt16_7(); var result = AdvSimd.MultiplySubtractBySelectedScalar(test._fld1, test._fld2, test._fld3, 7); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleTernaryOpTest__MultiplySubtractBySelectedScalar_Vector64_UInt16_Vector128_UInt16_7(); fixed (Vector64<UInt16>* pFld1 = &test._fld1) fixed (Vector64<UInt16>* pFld2 = &test._fld2) fixed (Vector128<UInt16>* pFld3 = &test._fld3) { var result = AdvSimd.MultiplySubtractBySelectedScalar( AdvSimd.LoadVector64((UInt16*)(pFld1)), AdvSimd.LoadVector64((UInt16*)(pFld2)), AdvSimd.LoadVector128((UInt16*)(pFld3)), 7 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.MultiplySubtractBySelectedScalar(_fld1, _fld2, _fld3, 7); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<UInt16>* pFld1 = &_fld1) fixed (Vector64<UInt16>* pFld2 = &_fld2) fixed (Vector128<UInt16>* pFld3 = &_fld3) { var result = AdvSimd.MultiplySubtractBySelectedScalar( AdvSimd.LoadVector64((UInt16*)(pFld1)), AdvSimd.LoadVector64((UInt16*)(pFld2)), AdvSimd.LoadVector128((UInt16*)(pFld3)), 7 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.MultiplySubtractBySelectedScalar(test._fld1, test._fld2, test._fld3, 7); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.MultiplySubtractBySelectedScalar( AdvSimd.LoadVector64((UInt16*)(&test._fld1)), AdvSimd.LoadVector64((UInt16*)(&test._fld2)), AdvSimd.LoadVector128((UInt16*)(&test._fld3)), 7 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<UInt16> op1, Vector64<UInt16> op2, Vector128<UInt16> op3, void* result, [CallerMemberName] string method = "") { UInt16[] inArray1 = new UInt16[Op1ElementCount]; UInt16[] inArray2 = new UInt16[Op2ElementCount]; UInt16[] inArray3 = new UInt16[Op3ElementCount]; UInt16[] outArray = new UInt16[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), op2); Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray3[0]), op3); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "") { UInt16[] inArray1 = new UInt16[Op1ElementCount]; UInt16[] inArray2 = new UInt16[Op2ElementCount]; UInt16[] inArray3 = new UInt16[Op3ElementCount]; UInt16[] outArray = new UInt16[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(UInt16[] firstOp, UInt16[] secondOp, UInt16[] thirdOp, UInt16[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.MultiplySubtract(firstOp[i], secondOp[i], thirdOp[Imm]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.MultiplySubtractBySelectedScalar)}<UInt16>(Vector64<UInt16>, Vector64<UInt16>, Vector128<UInt16>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})"); TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void MultiplySubtractBySelectedScalar_Vector64_UInt16_Vector128_UInt16_7() { var test = new SimpleTernaryOpTest__MultiplySubtractBySelectedScalar_Vector64_UInt16_Vector128_UInt16_7(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleTernaryOpTest__MultiplySubtractBySelectedScalar_Vector64_UInt16_Vector128_UInt16_7 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] inArray3; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle inHandle3; private GCHandle outHandle; private ulong alignment; public DataTable(UInt16[] inArray1, UInt16[] inArray2, UInt16[] inArray3, UInt16[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt16>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt16>(); int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<UInt16>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt16>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inArray3 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt16, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt16, byte>(ref inArray2[0]), (uint)sizeOfinArray2); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<UInt16, byte>(ref inArray3[0]), (uint)sizeOfinArray3); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); inHandle3.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<UInt16> _fld1; public Vector64<UInt16> _fld2; public Vector128<UInt16> _fld3; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref testStruct._fld3), ref Unsafe.As<UInt16, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); return testStruct; } public void RunStructFldScenario(SimpleTernaryOpTest__MultiplySubtractBySelectedScalar_Vector64_UInt16_Vector128_UInt16_7 testClass) { var result = AdvSimd.MultiplySubtractBySelectedScalar(_fld1, _fld2, _fld3, 7); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleTernaryOpTest__MultiplySubtractBySelectedScalar_Vector64_UInt16_Vector128_UInt16_7 testClass) { fixed (Vector64<UInt16>* pFld1 = &_fld1) fixed (Vector64<UInt16>* pFld2 = &_fld2) fixed (Vector128<UInt16>* pFld3 = &_fld3) { var result = AdvSimd.MultiplySubtractBySelectedScalar( AdvSimd.LoadVector64((UInt16*)(pFld1)), AdvSimd.LoadVector64((UInt16*)(pFld2)), AdvSimd.LoadVector128((UInt16*)(pFld3)), 7 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<UInt16>>() / sizeof(UInt16); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<UInt16>>() / sizeof(UInt16); private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector128<UInt16>>() / sizeof(UInt16); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<UInt16>>() / sizeof(UInt16); private static readonly byte Imm = 7; private static UInt16[] _data1 = new UInt16[Op1ElementCount]; private static UInt16[] _data2 = new UInt16[Op2ElementCount]; private static UInt16[] _data3 = new UInt16[Op3ElementCount]; private static Vector64<UInt16> _clsVar1; private static Vector64<UInt16> _clsVar2; private static Vector128<UInt16> _clsVar3; private Vector64<UInt16> _fld1; private Vector64<UInt16> _fld2; private Vector128<UInt16> _fld3; private DataTable _dataTable; static SimpleTernaryOpTest__MultiplySubtractBySelectedScalar_Vector64_UInt16_Vector128_UInt16_7() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _clsVar1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _clsVar2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _clsVar3), ref Unsafe.As<UInt16, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); } public SimpleTernaryOpTest__MultiplySubtractBySelectedScalar_Vector64_UInt16_Vector128_UInt16_7() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _fld1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _fld3), ref Unsafe.As<UInt16, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt16(); } _dataTable = new DataTable(_data1, _data2, _data3, new UInt16[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.MultiplySubtractBySelectedScalar( Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray3Ptr), 7 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.MultiplySubtractBySelectedScalar( AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray2Ptr)), AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray3Ptr)), 7 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplySubtractBySelectedScalar), new Type[] { typeof(Vector64<UInt16>), typeof(Vector64<UInt16>), typeof(Vector128<UInt16>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray3Ptr), (byte)7 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<UInt16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplySubtractBySelectedScalar), new Type[] { typeof(Vector64<UInt16>), typeof(Vector64<UInt16>), typeof(Vector128<UInt16>), typeof(byte) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray2Ptr)), AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray3Ptr)), (byte)7 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<UInt16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.MultiplySubtractBySelectedScalar( _clsVar1, _clsVar2, _clsVar3, 7 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<UInt16>* pClsVar1 = &_clsVar1) fixed (Vector64<UInt16>* pClsVar2 = &_clsVar2) fixed (Vector128<UInt16>* pClsVar3 = &_clsVar3) { var result = AdvSimd.MultiplySubtractBySelectedScalar( AdvSimd.LoadVector64((UInt16*)(pClsVar1)), AdvSimd.LoadVector64((UInt16*)(pClsVar2)), AdvSimd.LoadVector128((UInt16*)(pClsVar3)), 7 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray2Ptr); var op3 = Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray3Ptr); var result = AdvSimd.MultiplySubtractBySelectedScalar(op1, op2, op3, 7); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray2Ptr)); var op3 = AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray3Ptr)); var result = AdvSimd.MultiplySubtractBySelectedScalar(op1, op2, op3, 7); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleTernaryOpTest__MultiplySubtractBySelectedScalar_Vector64_UInt16_Vector128_UInt16_7(); var result = AdvSimd.MultiplySubtractBySelectedScalar(test._fld1, test._fld2, test._fld3, 7); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleTernaryOpTest__MultiplySubtractBySelectedScalar_Vector64_UInt16_Vector128_UInt16_7(); fixed (Vector64<UInt16>* pFld1 = &test._fld1) fixed (Vector64<UInt16>* pFld2 = &test._fld2) fixed (Vector128<UInt16>* pFld3 = &test._fld3) { var result = AdvSimd.MultiplySubtractBySelectedScalar( AdvSimd.LoadVector64((UInt16*)(pFld1)), AdvSimd.LoadVector64((UInt16*)(pFld2)), AdvSimd.LoadVector128((UInt16*)(pFld3)), 7 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.MultiplySubtractBySelectedScalar(_fld1, _fld2, _fld3, 7); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<UInt16>* pFld1 = &_fld1) fixed (Vector64<UInt16>* pFld2 = &_fld2) fixed (Vector128<UInt16>* pFld3 = &_fld3) { var result = AdvSimd.MultiplySubtractBySelectedScalar( AdvSimd.LoadVector64((UInt16*)(pFld1)), AdvSimd.LoadVector64((UInt16*)(pFld2)), AdvSimd.LoadVector128((UInt16*)(pFld3)), 7 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.MultiplySubtractBySelectedScalar(test._fld1, test._fld2, test._fld3, 7); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.MultiplySubtractBySelectedScalar( AdvSimd.LoadVector64((UInt16*)(&test._fld1)), AdvSimd.LoadVector64((UInt16*)(&test._fld2)), AdvSimd.LoadVector128((UInt16*)(&test._fld3)), 7 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<UInt16> op1, Vector64<UInt16> op2, Vector128<UInt16> op3, void* result, [CallerMemberName] string method = "") { UInt16[] inArray1 = new UInt16[Op1ElementCount]; UInt16[] inArray2 = new UInt16[Op2ElementCount]; UInt16[] inArray3 = new UInt16[Op3ElementCount]; UInt16[] outArray = new UInt16[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), op2); Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray3[0]), op3); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "") { UInt16[] inArray1 = new UInt16[Op1ElementCount]; UInt16[] inArray2 = new UInt16[Op2ElementCount]; UInt16[] inArray3 = new UInt16[Op3ElementCount]; UInt16[] outArray = new UInt16[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<UInt16>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(UInt16[] firstOp, UInt16[] secondOp, UInt16[] thirdOp, UInt16[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.MultiplySubtract(firstOp[i], secondOp[i], thirdOp[Imm]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.MultiplySubtractBySelectedScalar)}<UInt16>(Vector64<UInt16>, Vector64<UInt16>, Vector128<UInt16>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})"); TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/libraries/System.Private.CoreLib/src/System/IO/Path.Unix.NoniOS.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.IO { public static partial class Path { private static string DefaultTempPath => "/tmp/"; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.IO { public static partial class Path { private static string DefaultTempPath => "/tmp/"; } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/libraries/Common/src/Interop/Windows/Kernel32/Interop.FILE_BASIC_INFO.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. internal static partial class Interop { internal static partial class Kernel32 { // From FILE_INFO_BY_HANDLE_CLASS // Use for GetFileInformationByHandleEx/SetFileInformationByHandle internal const int FileBasicInfo = 0; internal struct FILE_BASIC_INFO { internal long CreationTime; internal long LastAccessTime; internal long LastWriteTime; internal long ChangeTime; internal uint FileAttributes; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. internal static partial class Interop { internal static partial class Kernel32 { // From FILE_INFO_BY_HANDLE_CLASS // Use for GetFileInformationByHandleEx/SetFileInformationByHandle internal const int FileBasicInfo = 0; internal struct FILE_BASIC_INFO { internal long CreationTime; internal long LastAccessTime; internal long LastWriteTime; internal long ChangeTime; internal uint FileAttributes; } } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/IL_Conformance/Old/Conformance_Base/dup4.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .class public _dup { .field public static int32 sentinel .field public static int32 none .field public static int32 all .method public static void initialize() { .maxstack 20 ldc.i4 0x00000000 stsfld int32 _dup::none ldc.i4 0xFFFFFFFF stsfld int32 _dup::all ldc.i4 0xFAFB0C0D stsfld int32 _dup::sentinel ret } .method public static int32 main(class [mscorlib]System.String[]) { .entrypoint .maxstack 20 call void _dup::initialize() ldsfld int32 _dup::sentinel ldsfld int32 _dup::none dup ceq brfalse FAIL ldsfld int32 _dup::all dup ceq brfalse FAIL dup ldsfld int32 _dup::sentinel ceq brfalse FAIL pop ldc.i4 100 ret FAIL: pop ldc.i4 0x0 ret } } .assembly dup4{}
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .class public _dup { .field public static int32 sentinel .field public static int32 none .field public static int32 all .method public static void initialize() { .maxstack 20 ldc.i4 0x00000000 stsfld int32 _dup::none ldc.i4 0xFFFFFFFF stsfld int32 _dup::all ldc.i4 0xFAFB0C0D stsfld int32 _dup::sentinel ret } .method public static int32 main(class [mscorlib]System.String[]) { .entrypoint .maxstack 20 call void _dup::initialize() ldsfld int32 _dup::sentinel ldsfld int32 _dup::none dup ceq brfalse FAIL ldsfld int32 _dup::all dup ceq brfalse FAIL dup ldsfld int32 _dup::sentinel ceq brfalse FAIL pop ldc.i4 100 ret FAIL: pop ldc.i4 0x0 ret } } .assembly dup4{}
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/IL_Conformance/Old/Conformance_Base/xor_u4.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <RestorePackages>true</RestorePackages> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="xor_u4.il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <RestorePackages>true</RestorePackages> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="xor_u4.il" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/Loader/classloader/InterfaceFolding/TestCase4.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="TestCase4.il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="TestCase4.il" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/libraries/System.Net.Mail/tests/Functional/LinkedResourceCollectionTest.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // LinkedResourceCollectionTest.cs - Unit Test Cases for System.Net.MailAddress.LinkedResourceCollection // // Authors: // John Luke ([email protected]) // // (C) 2005 John Luke // using System.Net.Mime; using Xunit; namespace System.Net.Mail.Tests { public class LinkedResourceCollectionTest { LinkedResourceCollection lrc; LinkedResource lr; public LinkedResourceCollectionTest() { lrc = AlternateView.CreateAlternateViewFromString("test", new ContentType("text/plain")).LinkedResources; lr = LinkedResource.CreateLinkedResourceFromString("test", new ContentType("text/plain")); } [Fact] public void InitialCount() { Assert.Equal(0, lrc.Count); } [Fact] public void AddCount() { lrc.Add(lr); Assert.Equal(1, lrc.Count); } [Fact] public void RemoveCount() { lrc.Remove(lr); Assert.Equal(0, lrc.Count); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // LinkedResourceCollectionTest.cs - Unit Test Cases for System.Net.MailAddress.LinkedResourceCollection // // Authors: // John Luke ([email protected]) // // (C) 2005 John Luke // using System.Net.Mime; using Xunit; namespace System.Net.Mail.Tests { public class LinkedResourceCollectionTest { LinkedResourceCollection lrc; LinkedResource lr; public LinkedResourceCollectionTest() { lrc = AlternateView.CreateAlternateViewFromString("test", new ContentType("text/plain")).LinkedResources; lr = LinkedResource.CreateLinkedResourceFromString("test", new ContentType("text/plain")); } [Fact] public void InitialCount() { Assert.Equal(0, lrc.Count); } [Fact] public void AddCount() { lrc.Add(lr); Assert.Equal(1, lrc.Count); } [Fact] public void RemoveCount() { lrc.Remove(lr); Assert.Equal(0, lrc.Count); } } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/libraries/System.Runtime.Numerics/tests/BigInteger/divrem.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; namespace System.Numerics.Tests { public class divremTest { private static int s_samples = 10; private static Random s_random = new Random(100); [Fact] public static void RunDivRem_TwoLargeBI() { byte[] tempByteArray1 = new byte[0]; byte[] tempByteArray2 = new byte[0]; // DivRem Method - Two Large BigIntegers for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random); tempByteArray2 = GetRandomByteArray(s_random); VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); } } [Fact] public static void RunDivRem_TwoSmallBI() { byte[] tempByteArray1 = new byte[0]; byte[] tempByteArray2 = new byte[0]; // DivRem Method - Two Small BigIntegers for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random, 2); tempByteArray2 = GetRandomByteArray(s_random, 2); VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); } } [Fact] public static void RunDivRem_OneSmallOneLargeBI() { byte[] tempByteArray1 = new byte[0]; byte[] tempByteArray2 = new byte[0]; // DivRem Method - One Large and one small BigIntegers for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random); tempByteArray2 = GetRandomByteArray(s_random, 2); VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); tempByteArray1 = GetRandomByteArray(s_random, 2); tempByteArray2 = GetRandomByteArray(s_random); VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); } } [Fact] public static void RunDivRem_OneLargeOne0BI() { byte[] tempByteArray1 = new byte[0]; byte[] tempByteArray2 = new byte[0]; // DivRem Method - One Large BigIntegers and zero for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random); tempByteArray2 = new byte[] { 0 }; VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); Assert.Throws<DivideByZeroException>(() => { VerifyDivRemString(Print(tempByteArray2) + Print(tempByteArray1) + "bDivRem"); }); } } [Fact] public static void RunDivRem_OneSmallOne0BI() { byte[] tempByteArray1 = new byte[0]; byte[] tempByteArray2 = new byte[0]; // DivRem Method - One small BigIntegers and zero for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random, 2); tempByteArray2 = new byte[] { 0 }; VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); Assert.Throws<DivideByZeroException>(() => { VerifyDivRemString(Print(tempByteArray2) + Print(tempByteArray1) + "bDivRem"); }); } } [Fact] public static void Boundary() { byte[] tempByteArray1 = new byte[0]; byte[] tempByteArray2 = new byte[0]; // Check interesting cases for boundary conditions // You'll either be shifting a 0 or 1 across the boundary // 32 bit boundary n2=0 VerifyDivRemString(Math.Pow(2, 32) + " 2 bDivRem"); // 32 bit boundary n1=0 n2=1 VerifyDivRemString(Math.Pow(2, 33) + " 2 bDivRem"); } [Fact] public static void RunDivRemTests() { byte[] tempByteArray1 = new byte[0]; byte[] tempByteArray2 = new byte[0]; // DivRem Method - Two Large BigIntegers for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random); tempByteArray2 = GetRandomByteArray(s_random); VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); } // DivRem Method - Two Small BigIntegers for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random, 2); tempByteArray2 = GetRandomByteArray(s_random, 2); VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); } // DivRem Method - One Large and one small BigIntegers for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random); tempByteArray2 = GetRandomByteArray(s_random, 2); VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); tempByteArray1 = GetRandomByteArray(s_random, 2); tempByteArray2 = GetRandomByteArray(s_random); VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); } // DivRem Method - One Large BigIntegers and zero for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random); tempByteArray2 = new byte[] { 0 }; VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); Assert.Throws<DivideByZeroException>(() => { VerifyDivRemString(Print(tempByteArray2) + Print(tempByteArray1) + "bDivRem"); }); } // DivRem Method - One small BigIntegers and zero for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random, 2); tempByteArray2 = new byte[] { 0 }; VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); Assert.Throws<DivideByZeroException>(() => { VerifyDivRemString(Print(tempByteArray2) + Print(tempByteArray1) + "bDivRem"); }); } // Check interesting cases for boundary conditions // You'll either be shifting a 0 or 1 across the boundary // 32 bit boundary n2=0 VerifyDivRemString(Math.Pow(2, 32) + " 2 bDivRem"); // 32 bit boundary n1=0 n2=1 VerifyDivRemString(Math.Pow(2, 33) + " 2 bDivRem"); } private static void VerifyDivRemString(string opstring) { try { StackCalc sc = new StackCalc(opstring); while (sc.DoNextOperation()) { Assert.Equal(sc.snCalc.Peek().ToString(), sc.myCalc.Peek().ToString()); sc.VerifyOutParameter(); } } catch (Exception e) when (!(e is DivideByZeroException)) { // Log the original parameters, so we can reproduce any failure given the log throw new Exception($"VerifyDivRemString failed: {opstring} {e.ToString()}", e); } } private static byte[] GetRandomByteArray(Random random) { return GetRandomByteArray(random, random.Next(1, 100)); } private static byte[] GetRandomByteArray(Random random, int size) { return MyBigIntImp.GetNonZeroRandomByteArray(random, size); } private static string Print(byte[] bytes) { return MyBigIntImp.Print(bytes); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; namespace System.Numerics.Tests { public class divremTest { private static int s_samples = 10; private static Random s_random = new Random(100); [Fact] public static void RunDivRem_TwoLargeBI() { byte[] tempByteArray1 = new byte[0]; byte[] tempByteArray2 = new byte[0]; // DivRem Method - Two Large BigIntegers for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random); tempByteArray2 = GetRandomByteArray(s_random); VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); } } [Fact] public static void RunDivRem_TwoSmallBI() { byte[] tempByteArray1 = new byte[0]; byte[] tempByteArray2 = new byte[0]; // DivRem Method - Two Small BigIntegers for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random, 2); tempByteArray2 = GetRandomByteArray(s_random, 2); VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); } } [Fact] public static void RunDivRem_OneSmallOneLargeBI() { byte[] tempByteArray1 = new byte[0]; byte[] tempByteArray2 = new byte[0]; // DivRem Method - One Large and one small BigIntegers for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random); tempByteArray2 = GetRandomByteArray(s_random, 2); VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); tempByteArray1 = GetRandomByteArray(s_random, 2); tempByteArray2 = GetRandomByteArray(s_random); VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); } } [Fact] public static void RunDivRem_OneLargeOne0BI() { byte[] tempByteArray1 = new byte[0]; byte[] tempByteArray2 = new byte[0]; // DivRem Method - One Large BigIntegers and zero for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random); tempByteArray2 = new byte[] { 0 }; VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); Assert.Throws<DivideByZeroException>(() => { VerifyDivRemString(Print(tempByteArray2) + Print(tempByteArray1) + "bDivRem"); }); } } [Fact] public static void RunDivRem_OneSmallOne0BI() { byte[] tempByteArray1 = new byte[0]; byte[] tempByteArray2 = new byte[0]; // DivRem Method - One small BigIntegers and zero for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random, 2); tempByteArray2 = new byte[] { 0 }; VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); Assert.Throws<DivideByZeroException>(() => { VerifyDivRemString(Print(tempByteArray2) + Print(tempByteArray1) + "bDivRem"); }); } } [Fact] public static void Boundary() { byte[] tempByteArray1 = new byte[0]; byte[] tempByteArray2 = new byte[0]; // Check interesting cases for boundary conditions // You'll either be shifting a 0 or 1 across the boundary // 32 bit boundary n2=0 VerifyDivRemString(Math.Pow(2, 32) + " 2 bDivRem"); // 32 bit boundary n1=0 n2=1 VerifyDivRemString(Math.Pow(2, 33) + " 2 bDivRem"); } [Fact] public static void RunDivRemTests() { byte[] tempByteArray1 = new byte[0]; byte[] tempByteArray2 = new byte[0]; // DivRem Method - Two Large BigIntegers for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random); tempByteArray2 = GetRandomByteArray(s_random); VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); } // DivRem Method - Two Small BigIntegers for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random, 2); tempByteArray2 = GetRandomByteArray(s_random, 2); VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); } // DivRem Method - One Large and one small BigIntegers for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random); tempByteArray2 = GetRandomByteArray(s_random, 2); VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); tempByteArray1 = GetRandomByteArray(s_random, 2); tempByteArray2 = GetRandomByteArray(s_random); VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); } // DivRem Method - One Large BigIntegers and zero for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random); tempByteArray2 = new byte[] { 0 }; VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); Assert.Throws<DivideByZeroException>(() => { VerifyDivRemString(Print(tempByteArray2) + Print(tempByteArray1) + "bDivRem"); }); } // DivRem Method - One small BigIntegers and zero for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random, 2); tempByteArray2 = new byte[] { 0 }; VerifyDivRemString(Print(tempByteArray1) + Print(tempByteArray2) + "bDivRem"); Assert.Throws<DivideByZeroException>(() => { VerifyDivRemString(Print(tempByteArray2) + Print(tempByteArray1) + "bDivRem"); }); } // Check interesting cases for boundary conditions // You'll either be shifting a 0 or 1 across the boundary // 32 bit boundary n2=0 VerifyDivRemString(Math.Pow(2, 32) + " 2 bDivRem"); // 32 bit boundary n1=0 n2=1 VerifyDivRemString(Math.Pow(2, 33) + " 2 bDivRem"); } private static void VerifyDivRemString(string opstring) { try { StackCalc sc = new StackCalc(opstring); while (sc.DoNextOperation()) { Assert.Equal(sc.snCalc.Peek().ToString(), sc.myCalc.Peek().ToString()); sc.VerifyOutParameter(); } } catch (Exception e) when (!(e is DivideByZeroException)) { // Log the original parameters, so we can reproduce any failure given the log throw new Exception($"VerifyDivRemString failed: {opstring} {e.ToString()}", e); } } private static byte[] GetRandomByteArray(Random random) { return GetRandomByteArray(random, random.Next(1, 100)); } private static byte[] GetRandomByteArray(Random random, int size) { return MyBigIntImp.GetNonZeroRandomByteArray(random, size); } private static string Print(byte[] bytes) { return MyBigIntImp.Print(bytes); } } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/CoreMangLib/system/delegate/regressions/devdivbugs/113347/ddb113347.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Reflection; using System.Security; [SecuritySafeCritical] class Program { static int Main() { Console.WriteLine("Attempting delegate construction with null method pointer."); Console.WriteLine("Expecting: ArgumentNullException wrapped in TargetInvocationException."); try { Activator.CreateInstance(typeof(Action<object>), null, IntPtr.Zero); Console.WriteLine("FAIL: Creation succeeded"); return 200; } catch (TargetInvocationException ex) { Console.WriteLine("Caught expected TargetInvocationException"); if (ex.InnerException == null) { Console.WriteLine("No inner exception was provided"); Console.WriteLine("FAILED"); return 201; } else if (ex.InnerException is ArgumentNullException) { Console.WriteLine("Inner exception is ArgumentNullException as expected"); Console.WriteLine("PASSED"); return 100; } else { Console.WriteLine("Unexpected inner exception: {0}", ex.InnerException); Console.WriteLine("FAILED"); return 202; } } catch (Exception ex) { Console.WriteLine("Caught unexpected exception: {0}", ex); Console.WriteLine("FAILED"); return 203; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Reflection; using System.Security; [SecuritySafeCritical] class Program { static int Main() { Console.WriteLine("Attempting delegate construction with null method pointer."); Console.WriteLine("Expecting: ArgumentNullException wrapped in TargetInvocationException."); try { Activator.CreateInstance(typeof(Action<object>), null, IntPtr.Zero); Console.WriteLine("FAIL: Creation succeeded"); return 200; } catch (TargetInvocationException ex) { Console.WriteLine("Caught expected TargetInvocationException"); if (ex.InnerException == null) { Console.WriteLine("No inner exception was provided"); Console.WriteLine("FAILED"); return 201; } else if (ex.InnerException is ArgumentNullException) { Console.WriteLine("Inner exception is ArgumentNullException as expected"); Console.WriteLine("PASSED"); return 100; } else { Console.WriteLine("Unexpected inner exception: {0}", ex.InnerException); Console.WriteLine("FAILED"); return 202; } } catch (Exception ex) { Console.WriteLine("Caught unexpected exception: {0}", ex); Console.WriteLine("FAILED"); return 203; } } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/libraries/Common/src/System/IO/Compression/ZLibNative.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.InteropServices; using System.Security; namespace System.IO.Compression { /// <summary> /// This class provides declaration for constants and PInvokes as well as some basic tools for exposing the /// native System.IO.Compression.Native.dll (effectively, ZLib) library to managed code. /// /// See also: How to choose a compression level (in comments to <code>CompressionLevel</code>. /// </summary> internal static partial class ZLibNative { // This is the NULL pointer for using with ZLib pointers; // we prefer it to IntPtr.Zero to mimic the definition of Z_NULL in zlib.h: internal static readonly IntPtr ZNullPtr = IntPtr.Zero; public enum FlushCode : int { NoFlush = 0, SyncFlush = 2, Finish = 4, Block = 5 } public enum ErrorCode : int { Ok = 0, StreamEnd = 1, StreamError = -2, DataError = -3, MemError = -4, BufError = -5, VersionError = -6 } /// <summary> /// <p>ZLib can accept any integer value between 0 and 9 (inclusive) as a valid compression level parameter: /// 1 gives best speed, 9 gives best compression, 0 gives no compression at all (the input data is simply copied a block at a time). /// <code>CompressionLevel.DefaultCompression</code> = -1 requests a default compromise between speed and compression /// (currently equivalent to level 6).</p> /// /// <p><strong>How to choose a compression level:</strong></p> /// /// <p>The names <code>NoCompression</code>, <code>BestSpeed</code>, <code>DefaultCompression</code>, <code>BestCompression</code> are taken over from /// the corresponding ZLib definitions, which map to our public NoCompression, Fastest, Optimal, and SmallestSize respectively.</p> /// <p><em>Optimal Compression:</em></p> /// <p><code>ZLibNative.CompressionLevel compressionLevel = ZLibNative.CompressionLevel.DefaultCompression;</code> <br /> /// <code>int windowBits = 15; // or -15 if no headers required</code> <br /> /// <code>int memLevel = 8;</code> <br /> /// <code>ZLibNative.CompressionStrategy strategy = ZLibNative.CompressionStrategy.DefaultStrategy;</code> </p> /// ///<p><em>Fastest compression:</em></p> ///<p><code>ZLibNative.CompressionLevel compressionLevel = ZLibNative.CompressionLevel.BestSpeed;</code> <br /> /// <code>int windowBits = 15; // or -15 if no headers required</code> <br /> /// <code>int memLevel = 8; </code> <br /> /// <code>ZLibNative.CompressionStrategy strategy = ZLibNative.CompressionStrategy.DefaultStrategy;</code> </p> /// /// <p><em>No compression (even faster, useful for data that cannot be compressed such some image formats):</em></p> /// <p><code>ZLibNative.CompressionLevel compressionLevel = ZLibNative.CompressionLevel.NoCompression;</code> <br /> /// <code>int windowBits = 15; // or -15 if no headers required</code> <br /> /// <code>int memLevel = 7;</code> <br /> /// <code>ZLibNative.CompressionStrategy strategy = ZLibNative.CompressionStrategy.DefaultStrategy;</code> </p> /// /// <p><em>Smallest Size Compression:</em></p> /// <p><code>ZLibNative.CompressionLevel compressionLevel = ZLibNative.CompressionLevel.BestCompression;</code> <br /> /// <code>int windowBits = 15; // or -15 if no headers required</code> <br /> /// <code>int memLevel = 8;</code> <br /> /// <code>ZLibNative.CompressionStrategy strategy = ZLibNative.CompressionStrategy.DefaultStrategy;</code> </p> /// </summary> public enum CompressionLevel : int { NoCompression = 0, BestSpeed = 1, DefaultCompression = -1, BestCompression = 9 } /// <summary> /// <p><strong>From the ZLib manual:</strong></p> /// <p><code>CompressionStrategy</code> is used to tune the compression algorithm.<br /> /// Use the value <code>DefaultStrategy</code> for normal data, <code>Filtered</code> for data produced by a filter (or predictor), /// <code>HuffmanOnly</code> to force Huffman encoding only (no string match), or <code>Rle</code> to limit match distances to one /// (run-length encoding). Filtered data consists mostly of small values with a somewhat random distribution. In this case, the /// compression algorithm is tuned to compress them better. The effect of <code>Filtered</code> is to force more Huffman coding and] /// less string matching; it is somewhat intermediate between <code>DefaultStrategy</code> and <code>HuffmanOnly</code>. /// <code>Rle</code> is designed to be almost as fast as <code>HuffmanOnly</code>, but give better compression for PNG image data. /// The strategy parameter only affects the compression ratio but not the correctness of the compressed output even if it is not set /// appropriately. <code>Fixed</code> prevents the use of dynamic Huffman codes, allowing for a simpler decoder for special applications.</p> /// /// <p><strong>For .NET Framework use:</strong></p> /// <p>We have investigated compression scenarios for a bunch of different frequently occurring compression data and found that in all /// cases we investigated so far, <code>DefaultStrategy</code> provided best results</p> /// <p>See also: How to choose a compression level (in comments to <code>CompressionLevel</code>.</p> /// </summary> public enum CompressionStrategy : int { DefaultStrategy = 0 } /// <summary> /// In version 1.2.3, ZLib provides on the <code>Deflated</code>-<code>CompressionMethod</code>. /// </summary> public enum CompressionMethod : int { Deflated = 8 } /// <summary> /// <p><strong>From the ZLib manual:</strong></p> /// <p>ZLib's <code>windowBits</code> parameter is the base two logarithm of the window size (the size of the history buffer). /// It should be in the range 8..15 for this version of the library. Larger values of this parameter result in better compression /// at the expense of memory usage. The default value is 15 if deflateInit is used instead.<br /></p> /// <strong>Note</strong>: /// <code>windowBits</code> can also be -8..-15 for raw deflate. In this case, -windowBits determines the window size. /// <code>Deflate</code> will then generate raw deflate data with no ZLib header or trailer, and will not compute an adler32 check value.<br /> /// <p>See also: How to choose a compression level (in comments to <code>CompressionLevel</code>.</p> /// </summary> public const int Deflate_DefaultWindowBits = -15; // Legal values are 8..15 and -8..-15. 15 is the window size, // negative val causes deflate to produce raw deflate data (no zlib header). /// <summary> /// <p><strong>From the ZLib manual:</strong></p> /// <p>ZLib's <code>windowBits</code> parameter is the base two logarithm of the window size (the size of the history buffer). /// It should be in the range 8..15 for this version of the library. Larger values of this parameter result in better compression /// at the expense of memory usage. The default value is 15 if deflateInit is used instead.<br /></p> /// </summary> public const int ZLib_DefaultWindowBits = 15; /// <summary> /// <p>Zlib's <code>windowBits</code> parameter is the base two logarithm of the window size (the size of the history buffer). /// For GZip header encoding, <code>windowBits</code> should be equal to a value between 8..15 (to specify Window Size) added to /// 16. The range of values for GZip encoding is therefore 24..31. /// <strong>Note</strong>: /// The GZip header will have no file name, no extra data, no comment, no modification time (set to zero), no header crc, and /// the operating system will be set based on the OS that the ZLib library was compiled to. <code>ZStream.adler</code> /// is a crc32 instead of an adler32.</p> /// </summary> public const int GZip_DefaultWindowBits = 31; /// <summary> /// <p><strong>From the ZLib manual:</strong></p> /// <p>The <code>memLevel</code> parameter specifies how much memory should be allocated for the internal compression state. /// <code>memLevel</code> = 1 uses minimum memory but is slow and reduces compression ratio; <code>memLevel</code> = 9 uses maximum /// memory for optimal speed. The default value is 8.</p> /// <p>See also: How to choose a compression level (in comments to <code>CompressionLevel</code>.</p> /// </summary> public const int Deflate_DefaultMemLevel = 8; // Memory usage by deflate. Legal range: [1..9]. 8 is ZLib default. // More is faster and better compression with more memory usage. public const int Deflate_NoCompressionMemLevel = 7; public const byte GZip_Header_ID1 = 31; public const byte GZip_Header_ID2 = 139; /** * Do not remove the nested typing of types inside of <code>System.IO.Compression.ZLibNative</code>. * This was done on purpose to: * * - Achieve the right encapsulation in a situation where <code>ZLibNative</code> may be compiled division-wide * into different assemblies that wish to consume <code>System.IO.Compression.Native</code>. Since <code>internal</code> * scope is effectively like <code>public</code> scope when compiling <code>ZLibNative</code> into a higher * level assembly, we need a combination of inner types and <code>private</code>-scope members to achieve * the right encapsulation. * * - Achieve late dynamic loading of <code>System.IO.Compression.Native.dll</code> at the right time. * The native assembly will not be loaded unless it is actually used since the loading is performed by a static * constructor of an inner type that is not directly referenced by user code. * * In Dev12 we would like to create a proper feature for loading native assemblies from user-specified * directories in order to PInvoke into them. This would preferably happen in the native interop/PInvoke * layer; if not we can add a Framework level feature. */ /// <summary> /// The <code>ZLibStreamHandle</code> could be a <code>CriticalFinalizerObject</code> rather than a /// <code>SafeHandleMinusOneIsInvalid</code>. This would save an <code>IntPtr</code> field since /// <code>ZLibStreamHandle</code> does not actually use its <code>handle</code> field. /// Instead it uses a <code>private ZStream zStream</code> field which is the actual handle data /// structure requiring critical finalization. /// However, we would like to take advantage if the better debugability offered by the fact that a /// <em>releaseHandleFailed MDA</em> is raised if the <code>ReleaseHandle</code> method returns /// <code>false</code>, which can for instance happen if the underlying ZLib <code>XxxxEnd</code> /// routines return an failure error code. /// </summary> public sealed class ZLibStreamHandle : SafeHandle { public enum State { NotInitialized, InitializedForDeflate, InitializedForInflate, Disposed } private ZStream _zStream; private volatile State _initializationState; public ZLibStreamHandle() : base(new IntPtr(-1), true) { _zStream.Init(); _initializationState = State.NotInitialized; SetHandle(IntPtr.Zero); } public override bool IsInvalid { get { return handle == new IntPtr(-1); } } public State InitializationState { get { return _initializationState; } } protected override bool ReleaseHandle() => InitializationState switch { State.NotInitialized => true, State.InitializedForDeflate => (DeflateEnd() == ErrorCode.Ok), State.InitializedForInflate => (InflateEnd() == ErrorCode.Ok), State.Disposed => true, _ => false, // This should never happen. Did we forget one of the State enum values in the switch? }; public IntPtr NextIn { get { return _zStream.nextIn; } set { _zStream.nextIn = value; } } public uint AvailIn { get { return _zStream.availIn; } set { _zStream.availIn = value; } } public IntPtr NextOut { get { return _zStream.nextOut; } set { _zStream.nextOut = value; } } public uint AvailOut { get { return _zStream.availOut; } set { _zStream.availOut = value; } } private void EnsureNotDisposed() { if (InitializationState == State.Disposed) throw new ObjectDisposedException(GetType().ToString()); } private void EnsureState(State requiredState) { if (InitializationState != requiredState) throw new InvalidOperationException("InitializationState != " + requiredState.ToString()); } public unsafe ErrorCode DeflateInit2_(CompressionLevel level, int windowBits, int memLevel, CompressionStrategy strategy) { EnsureNotDisposed(); EnsureState(State.NotInitialized); fixed (ZStream* stream = &_zStream) { ErrorCode errC = Interop.ZLib.DeflateInit2_(stream, level, CompressionMethod.Deflated, windowBits, memLevel, strategy); _initializationState = State.InitializedForDeflate; return errC; } } public unsafe ErrorCode Deflate(FlushCode flush) { EnsureNotDisposed(); EnsureState(State.InitializedForDeflate); fixed (ZStream* stream = &_zStream) { return Interop.ZLib.Deflate(stream, flush); } } public unsafe ErrorCode DeflateReset() { EnsureNotDisposed(); EnsureState(State.InitializedForDeflate); fixed (ZStream* stream = &_zStream) { return Interop.ZLib.DeflateReset(stream); } } public unsafe ErrorCode DeflateEnd() { EnsureNotDisposed(); EnsureState(State.InitializedForDeflate); fixed (ZStream* stream = &_zStream) { ErrorCode errC = Interop.ZLib.DeflateEnd(stream); _initializationState = State.Disposed; return errC; } } public unsafe ErrorCode InflateInit2_(int windowBits) { EnsureNotDisposed(); EnsureState(State.NotInitialized); fixed (ZStream* stream = &_zStream) { ErrorCode errC = Interop.ZLib.InflateInit2_(stream, windowBits); _initializationState = State.InitializedForInflate; return errC; } } public unsafe ErrorCode Inflate(FlushCode flush) { EnsureNotDisposed(); EnsureState(State.InitializedForInflate); fixed (ZStream* stream = &_zStream) { return Interop.ZLib.Inflate(stream, flush); } } public unsafe ErrorCode InflateReset() { EnsureNotDisposed(); EnsureState(State.InitializedForInflate); fixed (ZStream* stream = &_zStream) { return Interop.ZLib.InflateReset(stream); } } public unsafe ErrorCode InflateEnd() { EnsureNotDisposed(); EnsureState(State.InitializedForInflate); fixed (ZStream* stream = &_zStream) { ErrorCode errC = Interop.ZLib.InflateEnd(stream); _initializationState = State.Disposed; return errC; } } // This can work even after XxflateEnd(). public string GetErrorMessage() => _zStream.msg != ZNullPtr ? Marshal.PtrToStringAnsi(_zStream.msg)! : string.Empty; } public static ErrorCode CreateZLibStreamForDeflate(out ZLibStreamHandle zLibStreamHandle, CompressionLevel level, int windowBits, int memLevel, CompressionStrategy strategy) { zLibStreamHandle = new ZLibStreamHandle(); return zLibStreamHandle.DeflateInit2_(level, windowBits, memLevel, strategy); } public static ErrorCode CreateZLibStreamForInflate(out ZLibStreamHandle zLibStreamHandle, int windowBits) { zLibStreamHandle = new ZLibStreamHandle(); return zLibStreamHandle.InflateInit2_(windowBits); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.InteropServices; using System.Security; namespace System.IO.Compression { /// <summary> /// This class provides declaration for constants and PInvokes as well as some basic tools for exposing the /// native System.IO.Compression.Native.dll (effectively, ZLib) library to managed code. /// /// See also: How to choose a compression level (in comments to <code>CompressionLevel</code>. /// </summary> internal static partial class ZLibNative { // This is the NULL pointer for using with ZLib pointers; // we prefer it to IntPtr.Zero to mimic the definition of Z_NULL in zlib.h: internal static readonly IntPtr ZNullPtr = IntPtr.Zero; public enum FlushCode : int { NoFlush = 0, SyncFlush = 2, Finish = 4, Block = 5 } public enum ErrorCode : int { Ok = 0, StreamEnd = 1, StreamError = -2, DataError = -3, MemError = -4, BufError = -5, VersionError = -6 } /// <summary> /// <p>ZLib can accept any integer value between 0 and 9 (inclusive) as a valid compression level parameter: /// 1 gives best speed, 9 gives best compression, 0 gives no compression at all (the input data is simply copied a block at a time). /// <code>CompressionLevel.DefaultCompression</code> = -1 requests a default compromise between speed and compression /// (currently equivalent to level 6).</p> /// /// <p><strong>How to choose a compression level:</strong></p> /// /// <p>The names <code>NoCompression</code>, <code>BestSpeed</code>, <code>DefaultCompression</code>, <code>BestCompression</code> are taken over from /// the corresponding ZLib definitions, which map to our public NoCompression, Fastest, Optimal, and SmallestSize respectively.</p> /// <p><em>Optimal Compression:</em></p> /// <p><code>ZLibNative.CompressionLevel compressionLevel = ZLibNative.CompressionLevel.DefaultCompression;</code> <br /> /// <code>int windowBits = 15; // or -15 if no headers required</code> <br /> /// <code>int memLevel = 8;</code> <br /> /// <code>ZLibNative.CompressionStrategy strategy = ZLibNative.CompressionStrategy.DefaultStrategy;</code> </p> /// ///<p><em>Fastest compression:</em></p> ///<p><code>ZLibNative.CompressionLevel compressionLevel = ZLibNative.CompressionLevel.BestSpeed;</code> <br /> /// <code>int windowBits = 15; // or -15 if no headers required</code> <br /> /// <code>int memLevel = 8; </code> <br /> /// <code>ZLibNative.CompressionStrategy strategy = ZLibNative.CompressionStrategy.DefaultStrategy;</code> </p> /// /// <p><em>No compression (even faster, useful for data that cannot be compressed such some image formats):</em></p> /// <p><code>ZLibNative.CompressionLevel compressionLevel = ZLibNative.CompressionLevel.NoCompression;</code> <br /> /// <code>int windowBits = 15; // or -15 if no headers required</code> <br /> /// <code>int memLevel = 7;</code> <br /> /// <code>ZLibNative.CompressionStrategy strategy = ZLibNative.CompressionStrategy.DefaultStrategy;</code> </p> /// /// <p><em>Smallest Size Compression:</em></p> /// <p><code>ZLibNative.CompressionLevel compressionLevel = ZLibNative.CompressionLevel.BestCompression;</code> <br /> /// <code>int windowBits = 15; // or -15 if no headers required</code> <br /> /// <code>int memLevel = 8;</code> <br /> /// <code>ZLibNative.CompressionStrategy strategy = ZLibNative.CompressionStrategy.DefaultStrategy;</code> </p> /// </summary> public enum CompressionLevel : int { NoCompression = 0, BestSpeed = 1, DefaultCompression = -1, BestCompression = 9 } /// <summary> /// <p><strong>From the ZLib manual:</strong></p> /// <p><code>CompressionStrategy</code> is used to tune the compression algorithm.<br /> /// Use the value <code>DefaultStrategy</code> for normal data, <code>Filtered</code> for data produced by a filter (or predictor), /// <code>HuffmanOnly</code> to force Huffman encoding only (no string match), or <code>Rle</code> to limit match distances to one /// (run-length encoding). Filtered data consists mostly of small values with a somewhat random distribution. In this case, the /// compression algorithm is tuned to compress them better. The effect of <code>Filtered</code> is to force more Huffman coding and] /// less string matching; it is somewhat intermediate between <code>DefaultStrategy</code> and <code>HuffmanOnly</code>. /// <code>Rle</code> is designed to be almost as fast as <code>HuffmanOnly</code>, but give better compression for PNG image data. /// The strategy parameter only affects the compression ratio but not the correctness of the compressed output even if it is not set /// appropriately. <code>Fixed</code> prevents the use of dynamic Huffman codes, allowing for a simpler decoder for special applications.</p> /// /// <p><strong>For .NET Framework use:</strong></p> /// <p>We have investigated compression scenarios for a bunch of different frequently occurring compression data and found that in all /// cases we investigated so far, <code>DefaultStrategy</code> provided best results</p> /// <p>See also: How to choose a compression level (in comments to <code>CompressionLevel</code>.</p> /// </summary> public enum CompressionStrategy : int { DefaultStrategy = 0 } /// <summary> /// In version 1.2.3, ZLib provides on the <code>Deflated</code>-<code>CompressionMethod</code>. /// </summary> public enum CompressionMethod : int { Deflated = 8 } /// <summary> /// <p><strong>From the ZLib manual:</strong></p> /// <p>ZLib's <code>windowBits</code> parameter is the base two logarithm of the window size (the size of the history buffer). /// It should be in the range 8..15 for this version of the library. Larger values of this parameter result in better compression /// at the expense of memory usage. The default value is 15 if deflateInit is used instead.<br /></p> /// <strong>Note</strong>: /// <code>windowBits</code> can also be -8..-15 for raw deflate. In this case, -windowBits determines the window size. /// <code>Deflate</code> will then generate raw deflate data with no ZLib header or trailer, and will not compute an adler32 check value.<br /> /// <p>See also: How to choose a compression level (in comments to <code>CompressionLevel</code>.</p> /// </summary> public const int Deflate_DefaultWindowBits = -15; // Legal values are 8..15 and -8..-15. 15 is the window size, // negative val causes deflate to produce raw deflate data (no zlib header). /// <summary> /// <p><strong>From the ZLib manual:</strong></p> /// <p>ZLib's <code>windowBits</code> parameter is the base two logarithm of the window size (the size of the history buffer). /// It should be in the range 8..15 for this version of the library. Larger values of this parameter result in better compression /// at the expense of memory usage. The default value is 15 if deflateInit is used instead.<br /></p> /// </summary> public const int ZLib_DefaultWindowBits = 15; /// <summary> /// <p>Zlib's <code>windowBits</code> parameter is the base two logarithm of the window size (the size of the history buffer). /// For GZip header encoding, <code>windowBits</code> should be equal to a value between 8..15 (to specify Window Size) added to /// 16. The range of values for GZip encoding is therefore 24..31. /// <strong>Note</strong>: /// The GZip header will have no file name, no extra data, no comment, no modification time (set to zero), no header crc, and /// the operating system will be set based on the OS that the ZLib library was compiled to. <code>ZStream.adler</code> /// is a crc32 instead of an adler32.</p> /// </summary> public const int GZip_DefaultWindowBits = 31; /// <summary> /// <p><strong>From the ZLib manual:</strong></p> /// <p>The <code>memLevel</code> parameter specifies how much memory should be allocated for the internal compression state. /// <code>memLevel</code> = 1 uses minimum memory but is slow and reduces compression ratio; <code>memLevel</code> = 9 uses maximum /// memory for optimal speed. The default value is 8.</p> /// <p>See also: How to choose a compression level (in comments to <code>CompressionLevel</code>.</p> /// </summary> public const int Deflate_DefaultMemLevel = 8; // Memory usage by deflate. Legal range: [1..9]. 8 is ZLib default. // More is faster and better compression with more memory usage. public const int Deflate_NoCompressionMemLevel = 7; public const byte GZip_Header_ID1 = 31; public const byte GZip_Header_ID2 = 139; /** * Do not remove the nested typing of types inside of <code>System.IO.Compression.ZLibNative</code>. * This was done on purpose to: * * - Achieve the right encapsulation in a situation where <code>ZLibNative</code> may be compiled division-wide * into different assemblies that wish to consume <code>System.IO.Compression.Native</code>. Since <code>internal</code> * scope is effectively like <code>public</code> scope when compiling <code>ZLibNative</code> into a higher * level assembly, we need a combination of inner types and <code>private</code>-scope members to achieve * the right encapsulation. * * - Achieve late dynamic loading of <code>System.IO.Compression.Native.dll</code> at the right time. * The native assembly will not be loaded unless it is actually used since the loading is performed by a static * constructor of an inner type that is not directly referenced by user code. * * In Dev12 we would like to create a proper feature for loading native assemblies from user-specified * directories in order to PInvoke into them. This would preferably happen in the native interop/PInvoke * layer; if not we can add a Framework level feature. */ /// <summary> /// The <code>ZLibStreamHandle</code> could be a <code>CriticalFinalizerObject</code> rather than a /// <code>SafeHandleMinusOneIsInvalid</code>. This would save an <code>IntPtr</code> field since /// <code>ZLibStreamHandle</code> does not actually use its <code>handle</code> field. /// Instead it uses a <code>private ZStream zStream</code> field which is the actual handle data /// structure requiring critical finalization. /// However, we would like to take advantage if the better debugability offered by the fact that a /// <em>releaseHandleFailed MDA</em> is raised if the <code>ReleaseHandle</code> method returns /// <code>false</code>, which can for instance happen if the underlying ZLib <code>XxxxEnd</code> /// routines return an failure error code. /// </summary> public sealed class ZLibStreamHandle : SafeHandle { public enum State { NotInitialized, InitializedForDeflate, InitializedForInflate, Disposed } private ZStream _zStream; private volatile State _initializationState; public ZLibStreamHandle() : base(new IntPtr(-1), true) { _zStream.Init(); _initializationState = State.NotInitialized; SetHandle(IntPtr.Zero); } public override bool IsInvalid { get { return handle == new IntPtr(-1); } } public State InitializationState { get { return _initializationState; } } protected override bool ReleaseHandle() => InitializationState switch { State.NotInitialized => true, State.InitializedForDeflate => (DeflateEnd() == ErrorCode.Ok), State.InitializedForInflate => (InflateEnd() == ErrorCode.Ok), State.Disposed => true, _ => false, // This should never happen. Did we forget one of the State enum values in the switch? }; public IntPtr NextIn { get { return _zStream.nextIn; } set { _zStream.nextIn = value; } } public uint AvailIn { get { return _zStream.availIn; } set { _zStream.availIn = value; } } public IntPtr NextOut { get { return _zStream.nextOut; } set { _zStream.nextOut = value; } } public uint AvailOut { get { return _zStream.availOut; } set { _zStream.availOut = value; } } private void EnsureNotDisposed() { if (InitializationState == State.Disposed) throw new ObjectDisposedException(GetType().ToString()); } private void EnsureState(State requiredState) { if (InitializationState != requiredState) throw new InvalidOperationException("InitializationState != " + requiredState.ToString()); } public unsafe ErrorCode DeflateInit2_(CompressionLevel level, int windowBits, int memLevel, CompressionStrategy strategy) { EnsureNotDisposed(); EnsureState(State.NotInitialized); fixed (ZStream* stream = &_zStream) { ErrorCode errC = Interop.ZLib.DeflateInit2_(stream, level, CompressionMethod.Deflated, windowBits, memLevel, strategy); _initializationState = State.InitializedForDeflate; return errC; } } public unsafe ErrorCode Deflate(FlushCode flush) { EnsureNotDisposed(); EnsureState(State.InitializedForDeflate); fixed (ZStream* stream = &_zStream) { return Interop.ZLib.Deflate(stream, flush); } } public unsafe ErrorCode DeflateReset() { EnsureNotDisposed(); EnsureState(State.InitializedForDeflate); fixed (ZStream* stream = &_zStream) { return Interop.ZLib.DeflateReset(stream); } } public unsafe ErrorCode DeflateEnd() { EnsureNotDisposed(); EnsureState(State.InitializedForDeflate); fixed (ZStream* stream = &_zStream) { ErrorCode errC = Interop.ZLib.DeflateEnd(stream); _initializationState = State.Disposed; return errC; } } public unsafe ErrorCode InflateInit2_(int windowBits) { EnsureNotDisposed(); EnsureState(State.NotInitialized); fixed (ZStream* stream = &_zStream) { ErrorCode errC = Interop.ZLib.InflateInit2_(stream, windowBits); _initializationState = State.InitializedForInflate; return errC; } } public unsafe ErrorCode Inflate(FlushCode flush) { EnsureNotDisposed(); EnsureState(State.InitializedForInflate); fixed (ZStream* stream = &_zStream) { return Interop.ZLib.Inflate(stream, flush); } } public unsafe ErrorCode InflateReset() { EnsureNotDisposed(); EnsureState(State.InitializedForInflate); fixed (ZStream* stream = &_zStream) { return Interop.ZLib.InflateReset(stream); } } public unsafe ErrorCode InflateEnd() { EnsureNotDisposed(); EnsureState(State.InitializedForInflate); fixed (ZStream* stream = &_zStream) { ErrorCode errC = Interop.ZLib.InflateEnd(stream); _initializationState = State.Disposed; return errC; } } // This can work even after XxflateEnd(). public string GetErrorMessage() => _zStream.msg != ZNullPtr ? Marshal.PtrToStringAnsi(_zStream.msg)! : string.Empty; } public static ErrorCode CreateZLibStreamForDeflate(out ZLibStreamHandle zLibStreamHandle, CompressionLevel level, int windowBits, int memLevel, CompressionStrategy strategy) { zLibStreamHandle = new ZLibStreamHandle(); return zLibStreamHandle.DeflateInit2_(level, windowBits, memLevel, strategy); } public static ErrorCode CreateZLibStreamForInflate(out ZLibStreamHandle zLibStreamHandle, int windowBits) { zLibStreamHandle = new ZLibStreamHandle(); return zLibStreamHandle.InflateInit2_(windowBits); } } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/Regression/CLR-x86-JIT/V1-M11-Beta1/b47906/b47906.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).il" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/HardwareIntrinsics/General/Vector128/Subtract.UInt32.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void SubtractUInt32() { var test = new VectorBinaryOpTest__SubtractUInt32(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorBinaryOpTest__SubtractUInt32 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(UInt32[] inArray1, UInt32[] inArray2, UInt32[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt32>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt32>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt32, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt32, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<UInt32> _fld1; public Vector128<UInt32> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); return testStruct; } public void RunStructFldScenario(VectorBinaryOpTest__SubtractUInt32 testClass) { var result = Vector128.Subtract(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32); private static UInt32[] _data1 = new UInt32[Op1ElementCount]; private static UInt32[] _data2 = new UInt32[Op2ElementCount]; private static Vector128<UInt32> _clsVar1; private static Vector128<UInt32> _clsVar2; private Vector128<UInt32> _fld1; private Vector128<UInt32> _fld2; private DataTable _dataTable; static VectorBinaryOpTest__SubtractUInt32() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _clsVar1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _clsVar2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); } public VectorBinaryOpTest__SubtractUInt32() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } _dataTable = new DataTable(_data1, _data2, new UInt32[RetElementCount], LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Vector128.Subtract( Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var method = typeof(Vector128).GetMethod(nameof(Vector128.Subtract), new Type[] { typeof(Vector128<UInt32>), typeof(Vector128<UInt32>) }); if (method is null) { method = typeof(Vector128).GetMethod(nameof(Vector128.Subtract), 1, new Type[] { typeof(Vector128<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(Vector128<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) }); } if (method.IsGenericMethodDefinition) { method = method.MakeGenericMethod(typeof(UInt32)); } var result = method.Invoke(null, new object[] { Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Vector128.Subtract( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr); var result = Vector128.Subtract(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorBinaryOpTest__SubtractUInt32(); var result = Vector128.Subtract(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Vector128.Subtract(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Vector128.Subtract(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector128<UInt32> op1, Vector128<UInt32> op2, void* result, [CallerMemberName] string method = "") { UInt32[] inArray1 = new UInt32[Op1ElementCount]; UInt32[] inArray2 = new UInt32[Op2ElementCount]; UInt32[] outArray = new UInt32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { UInt32[] inArray1 = new UInt32[Op1ElementCount]; UInt32[] inArray2 = new UInt32[Op2ElementCount]; UInt32[] outArray = new UInt32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(UInt32[] left, UInt32[] right, UInt32[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != (uint)(left[0] - right[0])) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != (uint)(left[i] - right[i])) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector128)}.{nameof(Vector128.Subtract)}<UInt32>(Vector128<UInt32>, Vector128<UInt32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void SubtractUInt32() { var test = new VectorBinaryOpTest__SubtractUInt32(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorBinaryOpTest__SubtractUInt32 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(UInt32[] inArray1, UInt32[] inArray2, UInt32[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt32>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt32>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt32, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt32, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<UInt32> _fld1; public Vector128<UInt32> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); return testStruct; } public void RunStructFldScenario(VectorBinaryOpTest__SubtractUInt32 testClass) { var result = Vector128.Subtract(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32); private static UInt32[] _data1 = new UInt32[Op1ElementCount]; private static UInt32[] _data2 = new UInt32[Op2ElementCount]; private static Vector128<UInt32> _clsVar1; private static Vector128<UInt32> _clsVar2; private Vector128<UInt32> _fld1; private Vector128<UInt32> _fld2; private DataTable _dataTable; static VectorBinaryOpTest__SubtractUInt32() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _clsVar1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _clsVar2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); } public VectorBinaryOpTest__SubtractUInt32() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } _dataTable = new DataTable(_data1, _data2, new UInt32[RetElementCount], LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Vector128.Subtract( Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var method = typeof(Vector128).GetMethod(nameof(Vector128.Subtract), new Type[] { typeof(Vector128<UInt32>), typeof(Vector128<UInt32>) }); if (method is null) { method = typeof(Vector128).GetMethod(nameof(Vector128.Subtract), 1, new Type[] { typeof(Vector128<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(Vector128<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) }); } if (method.IsGenericMethodDefinition) { method = method.MakeGenericMethod(typeof(UInt32)); } var result = method.Invoke(null, new object[] { Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Vector128.Subtract( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr); var result = Vector128.Subtract(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorBinaryOpTest__SubtractUInt32(); var result = Vector128.Subtract(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Vector128.Subtract(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Vector128.Subtract(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector128<UInt32> op1, Vector128<UInt32> op2, void* result, [CallerMemberName] string method = "") { UInt32[] inArray1 = new UInt32[Op1ElementCount]; UInt32[] inArray2 = new UInt32[Op2ElementCount]; UInt32[] outArray = new UInt32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { UInt32[] inArray1 = new UInt32[Op1ElementCount]; UInt32[] inArray2 = new UInt32[Op2ElementCount]; UInt32[] outArray = new UInt32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt32>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(UInt32[] left, UInt32[] right, UInt32[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != (uint)(left[0] - right[0])) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != (uint)(left[i] - right[i])) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector128)}.{nameof(Vector128.Subtract)}<UInt32>(Vector128<UInt32>, Vector128<UInt32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/coreclr/pal/inc/rt/poppack.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // // =========================================================================== // File: poppack.h // // =========================================================================== /* Abstract: This file turns packing of structures off. (That is, it enables automatic alignment of structure fields.) An include file is needed because various compilers do this in different ways. poppack.h is the complement to pshpack?.h. An inclusion of poppack.h MUST ALWAYS be preceded by an inclusion of one of pshpack?.h, in one-to-one correspondence. For Microsoft compatible compilers, this file uses the pop option to the pack pragma so that it can restore the previous saved by the pshpack?.h include file. */ #if ! (defined(lint) || defined(RC_INVOKED)) #if ( _MSC_VER >= 800 && !defined(_M_I86)) || defined(_PUSHPOP_SUPPORTED) #pragma warning(disable:4103) #if !(defined( MIDL_PASS )) || defined( __midl ) #pragma pack(pop) #else #pragma pack() #endif #else #pragma pack() #endif #endif // ! (defined(lint) || defined(RC_INVOKED))
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // // =========================================================================== // File: poppack.h // // =========================================================================== /* Abstract: This file turns packing of structures off. (That is, it enables automatic alignment of structure fields.) An include file is needed because various compilers do this in different ways. poppack.h is the complement to pshpack?.h. An inclusion of poppack.h MUST ALWAYS be preceded by an inclusion of one of pshpack?.h, in one-to-one correspondence. For Microsoft compatible compilers, this file uses the pop option to the pack pragma so that it can restore the previous saved by the pshpack?.h include file. */ #if ! (defined(lint) || defined(RC_INVOKED)) #if ( _MSC_VER >= 800 && !defined(_M_I86)) || defined(_PUSHPOP_SUPPORTED) #pragma warning(disable:4103) #if !(defined( MIDL_PASS )) || defined( __midl ) #pragma pack(pop) #else #pragma pack() #endif #else #pragma pack() #endif #endif // ! (defined(lint) || defined(RC_INVOKED))
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/GC/Scenarios/BinTree/thdtreegrowingobj.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <GCStressIncompatible>true</GCStressIncompatible> <!-- The test leaves threads running at exit --> <UnloadabilityIncompatible>true</UnloadabilityIncompatible> </PropertyGroup> <ItemGroup> <Compile Include="thdtreegrowingobj.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="bintree.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <GCStressIncompatible>true</GCStressIncompatible> <!-- The test leaves threads running at exit --> <UnloadabilityIncompatible>true</UnloadabilityIncompatible> </PropertyGroup> <ItemGroup> <Compile Include="thdtreegrowingobj.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="bintree.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b31745/b31745.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> <!-- Test unsupported outside of windows --> <CLRTestTargetUnsupported Condition="'$(TargetsWindows)' != 'true'">true</CLRTestTargetUnsupported> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> <!-- Test unsupported outside of windows --> <CLRTestTargetUnsupported Condition="'$(TargetsWindows)' != 'true'">true</CLRTestTargetUnsupported> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/Methodical/NaN/r4NaNadd_cs_r.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>None</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="r4NaNadd.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>None</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="r4NaNadd.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/jit64/localloc/ehverify/eh12_dynamic.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly extern xunit.core {} .assembly extern mscorlib{} .assembly extern localloc_common{} .assembly 'eh12_dynamic'{} .class private auto ansi beforefieldinit LocallocTest extends [mscorlib]System.Object { .field private static int32 locallocSize .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 4 .locals (unsigned int64 local1, unsigned int64 local2, int32* intArray1, int32* intArray2, int32 retValue) IL_0000: ldc.i8 0xfedcba9876543210 IL_0009: stloc.0 IL_000a: ldloc.0 IL_000b: ldc.i4.1 IL_000c: conv.i8 IL_000d: add IL_000e: stloc.1 IL_000f: ldc.i4.4 IL_0010: ldsfld int32 [localloc_common]LocallocTesting.Global::stackAllocSize IL_0015: mul IL_0016: localloc IL_0018: stloc.2 IL_0019: ldc.i4.4 IL_001a: ldsfld int32 [localloc_common]LocallocTesting.Global::stackAllocSize IL_001f: mul IL_0020: localloc IL_0022: stloc.3 IL_0023: ldsfld int32 [localloc_common]LocallocTesting.Global::stackAllocSize IL_0028: stsfld int32 LocallocTest::locallocSize .try { IL_002d: ldstr "Test Exception" IL_0032: newobj instance void [mscorlib]System.Exception::.ctor(string) IL_0037: throw } // end .try filter { pop ldc.i4.1 // Invoke handler block endfilter } { IL_0038: pop ldstr "In outer handler block" call void [System.Console]System.Console::WriteLine(string) .try { IL_0039: ldloc.2 IL_003a: ldsfld int32 LocallocTest::locallocSize IL_003f: ldc.i4 1000 IL_0044: call void [localloc_common]LocallocTesting.Global::initializeStack(int32*, int32, int32) IL_0049: ldloc.3 IL_004a: ldsfld int32 LocallocTest::locallocSize IL_004f: ldc.i4 2000 IL_0054: call void [localloc_common]LocallocTesting.Global::initializeStack(int32*, int32, int32) IL_0059: ldstr "Test Exception" IL_005e: newobj instance void [mscorlib]System.Exception::.ctor(string) IL_0063: throw } // end .try filter { IL_0064: pop ldstr "In inner filter block" call void [System.Console]System.Console::WriteLine(string) IL_0065: ldstr "intArray1" IL_006a: ldloc.2 IL_006b: ldsfld int32 LocallocTest::locallocSize IL_0070: ldc.i4 1000 IL_0075: call bool [localloc_common]LocallocTesting.Global::verifyStack(string, int32*, int32, int32) IL_007a: brtrue.s IL_0084 IL_007c: ldc.i4.1 IL_007d: stloc.s retValue IL_007f: br.s L_FAILED1 IL_0084: ldstr "intArray2" IL_0089: ldloc.3 IL_008a: ldsfld int32 LocallocTest::locallocSize IL_008f: ldc.i4 2000 IL_0094: call bool [localloc_common]LocallocTesting.Global::verifyStack(string, int32*, int32, int32) IL_0099: brtrue.s L_PASS1 IL_009b: ldc.i4.1 IL_009c: stloc.s retValue L_FAILED1: ldc.i4.0 // DO not handle exception. br.s L_ENDFILTER1 L_PASS1: ldc.i4.1 L_ENDFILTER1: endfilter } { pop IL_00a3: leave.s IL_00a5 } // end handler IL_00a5: leave.s IL_00a7 } // end handler IL_00a7: ldstr "intArray1" IL_00ac: ldloc.2 IL_00ad: ldsfld int32 LocallocTest::locallocSize IL_00b2: ldc.i4 0x3e8 IL_00b7: call bool [localloc_common]LocallocTesting.Global::verifyStack(string, int32*, int32, int32) IL_00bc: brtrue.s IL_00c3 IL_00be: ldc.i4.1 IL_00bf: stloc.s retValue IL_00c1: br.s IL_0125 IL_00c3: ldstr "intArray2" IL_00c8: ldloc.3 IL_00c9: ldsfld int32 LocallocTest::locallocSize IL_00ce: ldc.i4 0x7d0 IL_00d3: call bool [localloc_common]LocallocTesting.Global::verifyStack(string, int32*, int32, int32) IL_00d8: brtrue.s IL_00df IL_00da: ldc.i4.1 IL_00db: stloc.s retValue IL_00dd: br.s IL_0125 IL_00df: ldstr "local1" IL_00e4: ldloc.0 IL_00e5: ldc.i8 0xfedcba9876543210 IL_00ee: call bool [localloc_common]LocallocTesting.Global::verifyLocal(string, unsigned int64, unsigned int64) IL_00f3: brtrue.s IL_00fa IL_00f5: ldc.i4.1 IL_00f6: stloc.s retValue IL_00f8: br.s IL_0125 IL_00fa: ldstr "local2" IL_00ff: ldloc.1 IL_0100: ldc.i8 0xfedcba9876543211 IL_0109: call bool [localloc_common]LocallocTesting.Global::verifyLocal(string, unsigned int64, unsigned int64) IL_010e: brtrue.s IL_0115 IL_0110: ldc.i4.1 IL_0111: stloc.s retValue IL_0113: br.s IL_0125 IL_0115: ldstr "Passed\n" IL_011a: call void [System.Console]System.Console::WriteLine(string) IL_011f: ldc.i4.s 100 IL_0121: stloc.s retValue IL_0123: br.s IL_0125 IL_0125: ldloc.s retValue IL_0127: ret } // end of method LocallocTest::Main .method private hidebysig specialname rtspecialname static void .cctor() cil managed { .maxstack 8 IL_0000: ldc.i4.0 IL_0001: stsfld int32 LocallocTest::locallocSize IL_0006: ret } // end of method LocallocTest::.cctor .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { .maxstack 8 IL_0000: ldarg.0 IL_0001: call instance void [mscorlib]System.Object::.ctor() IL_0006: ret } // end of method LocallocTest::.ctor } // end of class LocallocTest
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly extern xunit.core {} .assembly extern mscorlib{} .assembly extern localloc_common{} .assembly 'eh12_dynamic'{} .class private auto ansi beforefieldinit LocallocTest extends [mscorlib]System.Object { .field private static int32 locallocSize .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 4 .locals (unsigned int64 local1, unsigned int64 local2, int32* intArray1, int32* intArray2, int32 retValue) IL_0000: ldc.i8 0xfedcba9876543210 IL_0009: stloc.0 IL_000a: ldloc.0 IL_000b: ldc.i4.1 IL_000c: conv.i8 IL_000d: add IL_000e: stloc.1 IL_000f: ldc.i4.4 IL_0010: ldsfld int32 [localloc_common]LocallocTesting.Global::stackAllocSize IL_0015: mul IL_0016: localloc IL_0018: stloc.2 IL_0019: ldc.i4.4 IL_001a: ldsfld int32 [localloc_common]LocallocTesting.Global::stackAllocSize IL_001f: mul IL_0020: localloc IL_0022: stloc.3 IL_0023: ldsfld int32 [localloc_common]LocallocTesting.Global::stackAllocSize IL_0028: stsfld int32 LocallocTest::locallocSize .try { IL_002d: ldstr "Test Exception" IL_0032: newobj instance void [mscorlib]System.Exception::.ctor(string) IL_0037: throw } // end .try filter { pop ldc.i4.1 // Invoke handler block endfilter } { IL_0038: pop ldstr "In outer handler block" call void [System.Console]System.Console::WriteLine(string) .try { IL_0039: ldloc.2 IL_003a: ldsfld int32 LocallocTest::locallocSize IL_003f: ldc.i4 1000 IL_0044: call void [localloc_common]LocallocTesting.Global::initializeStack(int32*, int32, int32) IL_0049: ldloc.3 IL_004a: ldsfld int32 LocallocTest::locallocSize IL_004f: ldc.i4 2000 IL_0054: call void [localloc_common]LocallocTesting.Global::initializeStack(int32*, int32, int32) IL_0059: ldstr "Test Exception" IL_005e: newobj instance void [mscorlib]System.Exception::.ctor(string) IL_0063: throw } // end .try filter { IL_0064: pop ldstr "In inner filter block" call void [System.Console]System.Console::WriteLine(string) IL_0065: ldstr "intArray1" IL_006a: ldloc.2 IL_006b: ldsfld int32 LocallocTest::locallocSize IL_0070: ldc.i4 1000 IL_0075: call bool [localloc_common]LocallocTesting.Global::verifyStack(string, int32*, int32, int32) IL_007a: brtrue.s IL_0084 IL_007c: ldc.i4.1 IL_007d: stloc.s retValue IL_007f: br.s L_FAILED1 IL_0084: ldstr "intArray2" IL_0089: ldloc.3 IL_008a: ldsfld int32 LocallocTest::locallocSize IL_008f: ldc.i4 2000 IL_0094: call bool [localloc_common]LocallocTesting.Global::verifyStack(string, int32*, int32, int32) IL_0099: brtrue.s L_PASS1 IL_009b: ldc.i4.1 IL_009c: stloc.s retValue L_FAILED1: ldc.i4.0 // DO not handle exception. br.s L_ENDFILTER1 L_PASS1: ldc.i4.1 L_ENDFILTER1: endfilter } { pop IL_00a3: leave.s IL_00a5 } // end handler IL_00a5: leave.s IL_00a7 } // end handler IL_00a7: ldstr "intArray1" IL_00ac: ldloc.2 IL_00ad: ldsfld int32 LocallocTest::locallocSize IL_00b2: ldc.i4 0x3e8 IL_00b7: call bool [localloc_common]LocallocTesting.Global::verifyStack(string, int32*, int32, int32) IL_00bc: brtrue.s IL_00c3 IL_00be: ldc.i4.1 IL_00bf: stloc.s retValue IL_00c1: br.s IL_0125 IL_00c3: ldstr "intArray2" IL_00c8: ldloc.3 IL_00c9: ldsfld int32 LocallocTest::locallocSize IL_00ce: ldc.i4 0x7d0 IL_00d3: call bool [localloc_common]LocallocTesting.Global::verifyStack(string, int32*, int32, int32) IL_00d8: brtrue.s IL_00df IL_00da: ldc.i4.1 IL_00db: stloc.s retValue IL_00dd: br.s IL_0125 IL_00df: ldstr "local1" IL_00e4: ldloc.0 IL_00e5: ldc.i8 0xfedcba9876543210 IL_00ee: call bool [localloc_common]LocallocTesting.Global::verifyLocal(string, unsigned int64, unsigned int64) IL_00f3: brtrue.s IL_00fa IL_00f5: ldc.i4.1 IL_00f6: stloc.s retValue IL_00f8: br.s IL_0125 IL_00fa: ldstr "local2" IL_00ff: ldloc.1 IL_0100: ldc.i8 0xfedcba9876543211 IL_0109: call bool [localloc_common]LocallocTesting.Global::verifyLocal(string, unsigned int64, unsigned int64) IL_010e: brtrue.s IL_0115 IL_0110: ldc.i4.1 IL_0111: stloc.s retValue IL_0113: br.s IL_0125 IL_0115: ldstr "Passed\n" IL_011a: call void [System.Console]System.Console::WriteLine(string) IL_011f: ldc.i4.s 100 IL_0121: stloc.s retValue IL_0123: br.s IL_0125 IL_0125: ldloc.s retValue IL_0127: ret } // end of method LocallocTest::Main .method private hidebysig specialname rtspecialname static void .cctor() cil managed { .maxstack 8 IL_0000: ldc.i4.0 IL_0001: stsfld int32 LocallocTest::locallocSize IL_0006: ret } // end of method LocallocTest::.cctor .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { .maxstack 8 IL_0000: ldarg.0 IL_0001: call instance void [mscorlib]System.Object::.ctor() IL_0006: ret } // end of method LocallocTest::.ctor } // end of class LocallocTest
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/libraries/System.Composition.Hosting/src/System/Composition/Hosting/Providers/ExportFactory/ExportFactoryWithMetadataExportDescriptorProvider.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Reflection; using System.Composition.Hosting.Util; using System.Composition.Hosting.Core; using System.Linq; using System.Collections.Generic; using System.Composition.Hosting.Providers.Metadata; namespace System.Composition.Hosting.Providers.ExportFactory { internal sealed class ExportFactoryWithMetadataExportDescriptorProvider : ExportDescriptorProvider { private static readonly MethodInfo s_getLazyDefinitionsMethod = typeof(ExportFactoryWithMetadataExportDescriptorProvider).GetTypeInfo().GetDeclaredMethod("GetExportFactoryDescriptors"); public override IEnumerable<ExportDescriptorPromise> GetExportDescriptors(CompositionContract contract, DependencyAccessor definitionAccessor) { if (!contract.ContractType.IsConstructedGenericType || contract.ContractType.GetGenericTypeDefinition() != typeof(ExportFactory<,>)) return NoExportDescriptors; var ga = contract.ContractType.GenericTypeArguments; var gld = s_getLazyDefinitionsMethod.MakeGenericMethod(ga[0], ga[1]); var gldm = gld.CreateStaticDelegate<Func<CompositionContract, DependencyAccessor, object>>(); return (ExportDescriptorPromise[])gldm(contract, definitionAccessor); } private static ExportDescriptorPromise[] GetExportFactoryDescriptors<TProduct, TMetadata>(CompositionContract exportFactoryContract, DependencyAccessor definitionAccessor) { var productContract = exportFactoryContract.ChangeType(typeof(TProduct)); var boundaries = Array.Empty<string>(); IEnumerable<string> specifiedBoundaries; CompositionContract unwrapped; if (exportFactoryContract.TryUnwrapMetadataConstraint(Constants.SharingBoundaryImportMetadataConstraintName, out specifiedBoundaries, out unwrapped)) { productContract = unwrapped.ChangeType(typeof(TProduct)); boundaries = (specifiedBoundaries ?? Array.Empty<string>()).ToArray(); } var metadataProvider = MetadataViewProvider.GetMetadataViewProvider<TMetadata>(); return definitionAccessor.ResolveDependencies("product", productContract, false) .Select(d => new ExportDescriptorPromise( exportFactoryContract, typeof(ExportFactory<TProduct, TMetadata>).Name, false, () => new[] { d }, _ => { var dsc = d.Target.GetDescriptor(); return ExportDescriptor.Create((c, o) => { return new ExportFactory<TProduct, TMetadata>(() => { var lifetimeContext = new LifetimeContext(c, boundaries); return Tuple.Create<TProduct, Action>((TProduct)CompositionOperation.Run(lifetimeContext, dsc.Activator), lifetimeContext.Dispose); }, metadataProvider(dsc.Metadata)); }, dsc.Metadata); })) .ToArray(); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Reflection; using System.Composition.Hosting.Util; using System.Composition.Hosting.Core; using System.Linq; using System.Collections.Generic; using System.Composition.Hosting.Providers.Metadata; namespace System.Composition.Hosting.Providers.ExportFactory { internal sealed class ExportFactoryWithMetadataExportDescriptorProvider : ExportDescriptorProvider { private static readonly MethodInfo s_getLazyDefinitionsMethod = typeof(ExportFactoryWithMetadataExportDescriptorProvider).GetTypeInfo().GetDeclaredMethod("GetExportFactoryDescriptors"); public override IEnumerable<ExportDescriptorPromise> GetExportDescriptors(CompositionContract contract, DependencyAccessor definitionAccessor) { if (!contract.ContractType.IsConstructedGenericType || contract.ContractType.GetGenericTypeDefinition() != typeof(ExportFactory<,>)) return NoExportDescriptors; var ga = contract.ContractType.GenericTypeArguments; var gld = s_getLazyDefinitionsMethod.MakeGenericMethod(ga[0], ga[1]); var gldm = gld.CreateStaticDelegate<Func<CompositionContract, DependencyAccessor, object>>(); return (ExportDescriptorPromise[])gldm(contract, definitionAccessor); } private static ExportDescriptorPromise[] GetExportFactoryDescriptors<TProduct, TMetadata>(CompositionContract exportFactoryContract, DependencyAccessor definitionAccessor) { var productContract = exportFactoryContract.ChangeType(typeof(TProduct)); var boundaries = Array.Empty<string>(); IEnumerable<string> specifiedBoundaries; CompositionContract unwrapped; if (exportFactoryContract.TryUnwrapMetadataConstraint(Constants.SharingBoundaryImportMetadataConstraintName, out specifiedBoundaries, out unwrapped)) { productContract = unwrapped.ChangeType(typeof(TProduct)); boundaries = (specifiedBoundaries ?? Array.Empty<string>()).ToArray(); } var metadataProvider = MetadataViewProvider.GetMetadataViewProvider<TMetadata>(); return definitionAccessor.ResolveDependencies("product", productContract, false) .Select(d => new ExportDescriptorPromise( exportFactoryContract, typeof(ExportFactory<TProduct, TMetadata>).Name, false, () => new[] { d }, _ => { var dsc = d.Target.GetDescriptor(); return ExportDescriptor.Create((c, o) => { return new ExportFactory<TProduct, TMetadata>(() => { var lifetimeContext = new LifetimeContext(c, boundaries); return Tuple.Create<TProduct, Action>((TProduct)CompositionOperation.Run(lifetimeContext, dsc.Activator), lifetimeContext.Dispose); }, metadataProvider(dsc.Metadata)); }, dsc.Metadata); })) .ToArray(); } } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/mono/mono/tests/char-isnumber.cs
using System; class T { static int Main() { char[] chars = new char[] {'0', 'F', 'f', 'x', '1', 'n', 'a'}; bool[] results = new bool[] {true, false, false, false, true, false, false}; for (int i = 0; i < chars.Length; ++i) { if (Char.IsNumber (chars [i]) != results [i]) { Console.WriteLine ("Char '{0}' failed", chars [i]); return 1; } } return 0; } }
using System; class T { static int Main() { char[] chars = new char[] {'0', 'F', 'f', 'x', '1', 'n', 'a'}; bool[] results = new bool[] {true, false, false, false, true, false, false}; for (int i = 0; i < chars.Length; ++i) { if (Char.IsNumber (chars [i]) != results [i]) { Console.WriteLine ("Char '{0}' failed", chars [i]); return 1; } } return 0; } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/IL_Conformance/Old/Conformance_Base/conv_ovf_r8_i4.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .class public conv_ovf_i4 { .method public static int32 conv(float64,int32) { .locals (class [mscorlib]System.OverflowException, int32) .maxstack 2 try_start: ldarg 0 conv.ovf.i4 ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_u(float64,int32) { .locals (class [mscorlib]System.OverflowException, int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.i4.un stloc.1 leave.s try_end2 try_end2: ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public void conv_ovf_i4() { .maxstack 1 ret } .method public static int32 main(class [mscorlib]System.String[]) { .entrypoint .maxstack 5 ldc.r8 float64(0xFFF0000000000000) ldc.i4 0x00000000 call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL ldc.r8 float64(0xFFEFFFFFFFFFFFFF) ldc.i4 0x00000000 call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL ldc.r8 float64(0xBFF0000000000000) ldc.i4 0xFFFFFFFF call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 float64(0x8000000000000000) ldc.i4 0x00000000 call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 float64(0x0000000000000000) ldc.i4 0x00000000 call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 float64(0x3FF0000000000000) ldc.i4 0x00000001 call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 float64(0x7FEFFFFFFFFFFFFF) ldc.i4 0x00000000 call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL ldc.r8 float64(0x7FF0000000000000) ldc.i4 0x00000000 call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL ldc.r8 float64(0x7FF8000000000000) ldc.i4 0x00000000 call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL ldc.r8 1.5 ldc.i4 0x00000001 call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 99.99 ldc.i4 0x00000063 call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 float64(0xC1E0000000000000) ldc.i4 0x80000000 call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 float64(0x41DFFFFFFFC00000) ldc.i4 0x7FFFFFFF call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 float64(0x0000000000000000) ldc.i4 0x00000000 call int32 conv_ovf_i4::conv_u(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 float64(0x3FF0000000000000) ldc.i4 0x00000001 call int32 conv_ovf_i4::conv_u(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 float64(0x7FEFFFFFFFFFFFFF) ldc.i4 0x00000000 call int32 conv_ovf_i4::conv_u(float64,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL ldc.r8 float64(0x7FF0000000000000) ldc.i4 0x00000000 call int32 conv_ovf_i4::conv_u(float64,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL ldc.r8 float64(0x7FF8000000000000) ldc.i4 0x00000000 call int32 conv_ovf_i4::conv_u(float64,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL ldc.r8 1.5 ldc.i4 0x00000001 call int32 conv_ovf_i4::conv_u(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 99.99 ldc.i4 0x00000063 call int32 conv_ovf_i4::conv_u(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 float64(0x41DFFFFFFFC00000) ldc.i4 0x7FFFFFFF call int32 conv_ovf_i4::conv_u(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL PASS: ldc.i4 100 br END FAIL: ldc.i4 0x00000000 END: ret } } .assembly conv_ovf_r8_i4{}
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .class public conv_ovf_i4 { .method public static int32 conv(float64,int32) { .locals (class [mscorlib]System.OverflowException, int32) .maxstack 2 try_start: ldarg 0 conv.ovf.i4 ldarg 1 ceq stloc.1 leave.s try_end try_end: ldloc.1 brfalse FAIL ldc.i4 0x11111111 br END aHandler: isinst [mscorlib]System.OverflowException stloc 0 leave HEnd HEnd: ldloc 0 brfalse FAIL ldc.i4 0xEEEEEEEE br END FAIL: ldc.i4 0x00000000 br END END: ret .try try_start to try_end catch [mscorlib]System.OverflowException handler aHandler to HEnd } .method public static int32 conv_u(float64,int32) { .locals (class [mscorlib]System.OverflowException, int32) .maxstack 2 try_start2: ldarg 0 conv.ovf.i4.un stloc.1 leave.s try_end2 try_end2: ldloc.1 ldarg 1 ceq brfalse FAIL2 ldc.i4 0x11111111 br END2 aHandler2: isinst [mscorlib]System.OverflowException stloc 0 leave HEnd2 HEnd2: ldloc 0 brfalse FAIL2 ldc.i4 0xEEEEEEEE br END2 FAIL2: ldc.i4 0x00000000 br END2 END2: ret .try try_start2 to try_end2 catch [mscorlib]System.OverflowException handler aHandler2 to HEnd2 } .method public void conv_ovf_i4() { .maxstack 1 ret } .method public static int32 main(class [mscorlib]System.String[]) { .entrypoint .maxstack 5 ldc.r8 float64(0xFFF0000000000000) ldc.i4 0x00000000 call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL ldc.r8 float64(0xFFEFFFFFFFFFFFFF) ldc.i4 0x00000000 call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL ldc.r8 float64(0xBFF0000000000000) ldc.i4 0xFFFFFFFF call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 float64(0x8000000000000000) ldc.i4 0x00000000 call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 float64(0x0000000000000000) ldc.i4 0x00000000 call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 float64(0x3FF0000000000000) ldc.i4 0x00000001 call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 float64(0x7FEFFFFFFFFFFFFF) ldc.i4 0x00000000 call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL ldc.r8 float64(0x7FF0000000000000) ldc.i4 0x00000000 call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL ldc.r8 float64(0x7FF8000000000000) ldc.i4 0x00000000 call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL ldc.r8 1.5 ldc.i4 0x00000001 call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 99.99 ldc.i4 0x00000063 call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 float64(0xC1E0000000000000) ldc.i4 0x80000000 call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 float64(0x41DFFFFFFFC00000) ldc.i4 0x7FFFFFFF call int32 conv_ovf_i4::conv(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 float64(0x0000000000000000) ldc.i4 0x00000000 call int32 conv_ovf_i4::conv_u(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 float64(0x3FF0000000000000) ldc.i4 0x00000001 call int32 conv_ovf_i4::conv_u(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 float64(0x7FEFFFFFFFFFFFFF) ldc.i4 0x00000000 call int32 conv_ovf_i4::conv_u(float64,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL ldc.r8 float64(0x7FF0000000000000) ldc.i4 0x00000000 call int32 conv_ovf_i4::conv_u(float64,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL ldc.r8 float64(0x7FF8000000000000) ldc.i4 0x00000000 call int32 conv_ovf_i4::conv_u(float64,int32) ldc.i4 0xEEEEEEEE ceq brfalse FAIL ldc.r8 1.5 ldc.i4 0x00000001 call int32 conv_ovf_i4::conv_u(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 99.99 ldc.i4 0x00000063 call int32 conv_ovf_i4::conv_u(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL ldc.r8 float64(0x41DFFFFFFFC00000) ldc.i4 0x7FFFFFFF call int32 conv_ovf_i4::conv_u(float64,int32) ldc.i4 0x11111111 ceq brfalse FAIL PASS: ldc.i4 100 br END FAIL: ldc.i4 0x00000000 END: ret } } .assembly conv_ovf_r8_i4{}
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/HardwareIntrinsics/General/NotSupported/Vector128BooleanAsGeneric_Byte.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\General\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void Vector128BooleanAsGeneric_Byte() { bool succeeded = false; try { Vector128<byte> result = default(Vector128<bool>).As<bool, byte>(); } catch (NotSupportedException) { succeeded = true; } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"Vector128BooleanAsGeneric_Byte: RunNotSupportedScenario failed to throw NotSupportedException."); TestLibrary.TestFramework.LogInformation(string.Empty); throw new Exception("One or more scenarios did not complete as expected."); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\General\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void Vector128BooleanAsGeneric_Byte() { bool succeeded = false; try { Vector128<byte> result = default(Vector128<bool>).As<bool, byte>(); } catch (NotSupportedException) { succeeded = true; } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"Vector128BooleanAsGeneric_Byte: RunNotSupportedScenario failed to throw NotSupportedException."); TestLibrary.TestFramework.LogInformation(string.Empty); throw new Exception("One or more scenarios did not complete as expected."); } } } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/Loader/classloader/InterfaceFolding/Nested_I/TestCase1.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern mscorlib {} .assembly extern xunit.core {} .assembly TestCase1 {} // =============== CLASS MEMBERS DECLARATION =================== .class interface private abstract auto ansi J { .method public hidebysig newslot abstract virtual instance string Bar1() cil managed {} .method public hidebysig newslot abstract virtual instance string Bar2() cil managed {} } .class private auto ansi beforefieldinit A`1<U> implements class A`1/I`1<!U> { .class interface nested family abstract auto ansi I`1<T> { .method public hidebysig newslot abstract virtual instance string Foo() cil managed {} } .method public hidebysig newslot virtual instance string Foo() cil managed { ldstr "A::Foo" ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ret } } .class private auto ansi beforefieldinit B`2<V,W> extends class A`1<!V> implements class A`1/I`1<!W>, J { .method public hidebysig newslot virtual instance string Bar1() cil managed { .maxstack 8 ldstr "B::Bar1" ret } .method public hidebysig newslot virtual instance string Bar2() cil managed { .maxstack 8 ldstr "B::Bar2" ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ret } } .class private auto ansi beforefieldinit C extends class B`2<class C,class C> { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ret } } .class public auto ansi beforefieldinit Test_TestCase1 extends [mscorlib]System.Object { .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint newobj instance void C::.ctor() callvirt instance string class C::Foo() ldstr "A::Foo" call bool [mscorlib]System.String::op_Inequality(string, string) brtrue FAILURE newobj instance void C::.ctor() callvirt instance string class C::Bar1() ldstr "B::Bar1" call bool [mscorlib]System.String::op_Inequality(string,string) brtrue FAILURE newobj instance void C::.ctor() callvirt instance string class C::Bar2() ldstr "B::Bar2" call bool [mscorlib]System.String::op_Inequality(string,string) brtrue FAILURE PASS: ldstr "Pass" call void [mscorlib]System.Console::WriteLine(string) ldc.i4.s 100 ret FAILURE: ldstr "Failed!" call void [mscorlib]System.Console::WriteLine(string) ldc.i4.m1 ret } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern mscorlib {} .assembly extern xunit.core {} .assembly TestCase1 {} // =============== CLASS MEMBERS DECLARATION =================== .class interface private abstract auto ansi J { .method public hidebysig newslot abstract virtual instance string Bar1() cil managed {} .method public hidebysig newslot abstract virtual instance string Bar2() cil managed {} } .class private auto ansi beforefieldinit A`1<U> implements class A`1/I`1<!U> { .class interface nested family abstract auto ansi I`1<T> { .method public hidebysig newslot abstract virtual instance string Foo() cil managed {} } .method public hidebysig newslot virtual instance string Foo() cil managed { ldstr "A::Foo" ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ret } } .class private auto ansi beforefieldinit B`2<V,W> extends class A`1<!V> implements class A`1/I`1<!W>, J { .method public hidebysig newslot virtual instance string Bar1() cil managed { .maxstack 8 ldstr "B::Bar1" ret } .method public hidebysig newslot virtual instance string Bar2() cil managed { .maxstack 8 ldstr "B::Bar2" ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ret } } .class private auto ansi beforefieldinit C extends class B`2<class C,class C> { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ret } } .class public auto ansi beforefieldinit Test_TestCase1 extends [mscorlib]System.Object { .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint newobj instance void C::.ctor() callvirt instance string class C::Foo() ldstr "A::Foo" call bool [mscorlib]System.String::op_Inequality(string, string) brtrue FAILURE newobj instance void C::.ctor() callvirt instance string class C::Bar1() ldstr "B::Bar1" call bool [mscorlib]System.String::op_Inequality(string,string) brtrue FAILURE newobj instance void C::.ctor() callvirt instance string class C::Bar2() ldstr "B::Bar2" call bool [mscorlib]System.String::op_Inequality(string,string) brtrue FAILURE PASS: ldstr "Pass" call void [mscorlib]System.Console::WriteLine(string) ldc.i4.s 100 ret FAILURE: ldstr "Failed!" call void [mscorlib]System.Console::WriteLine(string) ldc.i4.m1 ret } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest784/Generated784.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 } .assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) } //TYPES IN FORWARDER ASSEMBLIES: //TEST ASSEMBLY: .assembly Generated784 { .hash algorithm 0x00008004 } .assembly extern xunit.core {} .class public BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class public BaseClass1 extends BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void BaseClass0::.ctor() ret } } .class public G3_C1256`1<T0> extends class G2_C302`2<class BaseClass0,class BaseClass1> implements class IBase1`1<!T0> { .method public hidebysig virtual instance string Method4() cil managed noinlining { ldstr "G3_C1256::Method4.15145()" ret } .method public hidebysig newslot virtual instance string Method5() cil managed noinlining { ldstr "G3_C1256::Method5.15146()" ret } .method public hidebysig virtual instance string Method6<M0>() cil managed noinlining { ldstr "G3_C1256::Method6.15147<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase1<T0>.Method6'<M0>() cil managed noinlining { .override method instance string class IBase1`1<!T0>::Method6<[1]>() ldstr "G3_C1256::Method6.MI.15148<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G2_C302`2<class BaseClass0,class BaseClass1>::.ctor() ret } } .class public G2_C302`2<T0, T1> extends class G1_C6`2<!T0,class BaseClass1> implements class IBase2`2<class BaseClass1,!T1>, class IBase1`1<class BaseClass0> { .method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining { ldstr "G2_C302::Method7.7555<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig virtual instance string Method4() cil managed noinlining { ldstr "G2_C302::Method4.7556()" ret } .method public hidebysig virtual instance string Method5() cil managed noinlining { ldstr "G2_C302::Method5.7557()" ret } .method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method5'() cil managed noinlining { .override method instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G2_C302::Method5.MI.7558()" ret } .method public hidebysig newslot virtual instance string Method6<M0>() cil managed noinlining { ldstr "G2_C302::Method6.7559<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method6'<M0>() cil managed noinlining { .override method instance string class IBase1`1<class BaseClass0>::Method6<[1]>() ldstr "G2_C302::Method6.MI.7560<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod1936() cil managed noinlining { ldstr "G2_C302::ClassMethod1936.7561()" ret } .method public hidebysig newslot virtual instance string 'G1_C6<T0,class BaseClass1>.ClassMethod1326'() cil managed noinlining { .override method instance string class G1_C6`2<!T0,class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ret } .method public hidebysig newslot virtual instance string 'G1_C6<T0,class BaseClass1>.ClassMethod1327'<M0>() cil managed noinlining { .override method instance string class G1_C6`2<!T0,class BaseClass1>::ClassMethod1327<[1]>() ldstr "G2_C302::ClassMethod1327.MI.7563<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G1_C6`2<!T0,class BaseClass1>::.ctor() ret } } .class interface public abstract IBase1`1<+T0> { .method public hidebysig newslot abstract virtual instance string Method4() cil managed { } .method public hidebysig newslot abstract virtual instance string Method5() cil managed { } .method public hidebysig newslot abstract virtual instance string Method6<M0>() cil managed { } } .class public G1_C6`2<T0, T1> implements class IBase2`2<class BaseClass1,!T0> { .method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining { ldstr "G1_C6::Method7.4808<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod1326() cil managed noinlining { ldstr "G1_C6::ClassMethod1326.4809()" ret } .method public hidebysig newslot virtual instance string ClassMethod1327<M0>() cil managed noinlining { ldstr "G1_C6::ClassMethod1327.4810<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class interface public abstract IBase2`2<+T0, -T1> { .method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { } } .class public auto ansi beforefieldinit Generated784 { .method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1256.T<T0,(class G3_C1256`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 12 .locals init (string[] actualResults) ldc.i4.s 7 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1256.T<T0,(class G3_C1256`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 7 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<!!T0>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<!!T0>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<!!T0>::ClassMethod1936() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<!!T0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1256.A<(class G3_C1256`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 12 .locals init (string[] actualResults) ldc.i4.s 7 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1256.A<(class G3_C1256`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 7 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass0>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass0>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass0>::ClassMethod1936() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1256.B<(class G3_C1256`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 12 .locals init (string[] actualResults) ldc.i4.s 7 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1256.B<(class G3_C1256`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 7 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass1>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass1>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass1>::ClassMethod1936() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C302.T.T<T0,T1,(class G2_C302`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 12 .locals init (string[] actualResults) ldc.i4.s 7 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C302.T.T<T0,T1,(class G2_C302`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 7 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<!!T0,!!T1>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<!!T0,!!T1>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<!!T0,!!T1>::ClassMethod1936() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<!!T0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<!!T0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<!!T0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C302.A.T<T1,(class G2_C302`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 12 .locals init (string[] actualResults) ldc.i4.s 7 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C302.A.T<T1,(class G2_C302`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 7 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,!!T1>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,!!T1>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,!!T1>::ClassMethod1936() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C302.A.A<(class G2_C302`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 12 .locals init (string[] actualResults) ldc.i4.s 7 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C302.A.A<(class G2_C302`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 7 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::ClassMethod1936() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C302.A.B<(class G2_C302`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 12 .locals init (string[] actualResults) ldc.i4.s 7 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C302.A.B<(class G2_C302`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 7 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1936() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C302.B.T<T1,(class G2_C302`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 12 .locals init (string[] actualResults) ldc.i4.s 7 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C302.B.T<T1,(class G2_C302`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 7 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,!!T1>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,!!T1>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,!!T1>::ClassMethod1936() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C302.B.A<(class G2_C302`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 12 .locals init (string[] actualResults) ldc.i4.s 7 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C302.B.A<(class G2_C302`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 7 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::ClassMethod1936() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C302.B.B<(class G2_C302`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 12 .locals init (string[] actualResults) ldc.i4.s 7 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C302.B.B<(class G2_C302`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 7 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::ClassMethod1936() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C6.T.T<T0,T1,(class G1_C6`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C6.T.T<T0,T1,(class G1_C6`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<!!T0,!!T1>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<!!T0,!!T1>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C6.A.T<T1,(class G1_C6`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C6.A.T<T1,(class G1_C6`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass0,!!T1>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass0,!!T1>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C6.A.A<(class G1_C6`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C6.A.A<(class G1_C6`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C6.A.B<(class G1_C6`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C6.A.B<(class G1_C6`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C6.B.T<T1,(class G1_C6`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C6.B.T<T1,(class G1_C6`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass1,!!T1>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass1,!!T1>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C6.B.A<(class G1_C6`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C6.B.A<(class G1_C6`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C6.B.B<(class G1_C6`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C6.B.B<(class G1_C6`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method public hidebysig static void MethodCallingTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calling Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1256`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1936() ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G3_C1256::Method6.15147<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G3_C1256::Method4.15145()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G3_C1256::Method4.15145()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G3_C1256::Method5.15146()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G3_C1256::Method6.MI.15148<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1256`1<class BaseClass0> callvirt instance string class G3_C1256`1<class BaseClass0>::Method6<object>() ldstr "G3_C1256::Method6.15147<System.Object>()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass0> callvirt instance string class G3_C1256`1<class BaseClass0>::Method5() ldstr "G3_C1256::Method5.15146()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass0> callvirt instance string class G3_C1256`1<class BaseClass0>::Method4() ldstr "G3_C1256::Method4.15145()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass0> callvirt instance string class G3_C1256`1<class BaseClass0>::ClassMethod1936() ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass0> callvirt instance string class G3_C1256`1<class BaseClass0>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass0> callvirt instance string class G3_C1256`1<class BaseClass0>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass0> callvirt instance string class G3_C1256`1<class BaseClass0>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G3_C1256`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1936() ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G3_C1256::Method6.15147<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G3_C1256::Method4.15145()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G3_C1256::Method4.15145()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G3_C1256::Method5.15146()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G3_C1256::Method6.MI.15148<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1256`1<class BaseClass1> callvirt instance string class G3_C1256`1<class BaseClass1>::Method6<object>() ldstr "G3_C1256::Method6.15147<System.Object>()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass1> callvirt instance string class G3_C1256`1<class BaseClass1>::Method5() ldstr "G3_C1256::Method5.15146()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass1> callvirt instance string class G3_C1256`1<class BaseClass1>::Method4() ldstr "G3_C1256::Method4.15145()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass1> callvirt instance string class G3_C1256`1<class BaseClass1>::ClassMethod1936() ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass1> callvirt instance string class G3_C1256`1<class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass1> callvirt instance string class G3_C1256`1<class BaseClass1>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass1> callvirt instance string class G3_C1256`1<class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G3_C1256::Method4.15145()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G3_C1256::Method5.15146()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G3_C1256::Method6.MI.15148<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C302`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C302`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::ClassMethod1936() ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G2_C302::Method6.7559<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G2_C302::Method4.7556()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G2_C302::Method4.7556()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G2_C302::Method5.MI.7558()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G2_C302::Method6.MI.7560<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C302`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1936() ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G2_C302::Method6.7559<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G2_C302::Method4.7556()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G2_C302::Method4.7556()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G2_C302::Method5.MI.7558()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G2_C302::Method6.MI.7560<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C302`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C6`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C302`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::ClassMethod1936() ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method6<object>() ldstr "G2_C302::Method6.7559<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method5() ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method4() ldstr "G2_C302::Method4.7556()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G2_C302::Method4.7556()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G2_C302::Method5.MI.7558()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G2_C302::Method6.MI.7560<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C302`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C6`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C302`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::ClassMethod1936() ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G2_C302::Method6.7559<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G2_C302::Method4.7556()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G2_C302::Method4.7556()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G2_C302::Method5.MI.7558()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G2_C302::Method6.MI.7560<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G1_C6`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C6`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>() ldstr "G1_C6::ClassMethod1327.4810<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1326() ldstr "G1_C6::ClassMethod1326.4809()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G1_C6`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() ldstr "G1_C6::ClassMethod1327.4810<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326() ldstr "G1_C6::ClassMethod1326.4809()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G1_C6`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C6`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>() ldstr "G1_C6::ClassMethod1327.4810<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326() ldstr "G1_C6::ClassMethod1326.4809()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G1_C6`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C6`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>() ldstr "G1_C6::ClassMethod1327.4810<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326() ldstr "G1_C6::ClassMethod1326.4809()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void ConstrainedCallsTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Constrained Calls Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1256`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.T.T<class BaseClass0,class BaseClass1,class G3_C1256`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.T<class BaseClass1,class G3_C1256`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.B<class G3_C1256`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G3_C1256`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass0,class G3_C1256`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.A<class G3_C1256`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1256`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass0,class G3_C1256`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.A<class G3_C1256`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1256`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass1,class G3_C1256`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.B<class G3_C1256`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1256`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass1,class G3_C1256`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.B<class G3_C1256`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G3_C1256::Method4.15145()#G2_C302::Method5.7557()#G3_C1256::Method6.15147<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.T.T<class BaseClass0,class BaseClass1,class G3_C1256`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G3_C1256::Method4.15145()#G2_C302::Method5.7557()#G3_C1256::Method6.15147<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.A.T<class BaseClass1,class G3_C1256`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G3_C1256::Method4.15145()#G2_C302::Method5.7557()#G3_C1256::Method6.15147<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.A.B<class G3_C1256`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1256::Method4.15145()#G3_C1256::Method5.15146()#G3_C1256::Method6.MI.15148<System.Object>()#" call void Generated784::M.IBase1.T<class BaseClass0,class G3_C1256`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1256::Method4.15145()#G3_C1256::Method5.15146()#G3_C1256::Method6.MI.15148<System.Object>()#" call void Generated784::M.IBase1.A<class G3_C1256`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G3_C1256::Method4.15145()#G3_C1256::Method5.15146()#G3_C1256::Method6.15147<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G3_C1256.T<class BaseClass0,class G3_C1256`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G3_C1256::Method4.15145()#G3_C1256::Method5.15146()#G3_C1256::Method6.15147<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G3_C1256.A<class G3_C1256`1<class BaseClass0>>(!!0,string) newobj instance void class G3_C1256`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.T.T<class BaseClass0,class BaseClass1,class G3_C1256`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.T<class BaseClass1,class G3_C1256`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.B<class G3_C1256`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G3_C1256`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass0,class G3_C1256`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.A<class G3_C1256`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1256`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass0,class G3_C1256`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.A<class G3_C1256`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1256`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass1,class G3_C1256`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.B<class G3_C1256`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1256`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass1,class G3_C1256`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.B<class G3_C1256`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G3_C1256::Method4.15145()#G2_C302::Method5.7557()#G3_C1256::Method6.15147<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.T.T<class BaseClass0,class BaseClass1,class G3_C1256`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G3_C1256::Method4.15145()#G2_C302::Method5.7557()#G3_C1256::Method6.15147<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.A.T<class BaseClass1,class G3_C1256`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G3_C1256::Method4.15145()#G2_C302::Method5.7557()#G3_C1256::Method6.15147<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.A.B<class G3_C1256`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1256::Method4.15145()#G3_C1256::Method5.15146()#G3_C1256::Method6.MI.15148<System.Object>()#" call void Generated784::M.IBase1.T<class BaseClass0,class G3_C1256`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1256::Method4.15145()#G3_C1256::Method5.15146()#G3_C1256::Method6.MI.15148<System.Object>()#" call void Generated784::M.IBase1.A<class G3_C1256`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G3_C1256::Method4.15145()#G3_C1256::Method5.15146()#G3_C1256::Method6.15147<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G3_C1256.T<class BaseClass1,class G3_C1256`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G3_C1256::Method4.15145()#G3_C1256::Method5.15146()#G3_C1256::Method6.15147<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G3_C1256.B<class G3_C1256`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1256::Method4.15145()#G3_C1256::Method5.15146()#G3_C1256::Method6.MI.15148<System.Object>()#" call void Generated784::M.IBase1.T<class BaseClass1,class G3_C1256`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1256::Method4.15145()#G3_C1256::Method5.15146()#G3_C1256::Method6.MI.15148<System.Object>()#" call void Generated784::M.IBase1.B<class G3_C1256`1<class BaseClass1>>(!!0,string) newobj instance void class G2_C302`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.T.T<class BaseClass0,class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.T<class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.B<class G2_C302`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.A<class G2_C302`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.A<class G2_C302`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.B<class G2_C302`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.B<class G2_C302`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.T.T<class BaseClass0,class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.A.T<class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.A.A<class G2_C302`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method4.7556()#G2_C302::Method5.MI.7558()#G2_C302::Method6.MI.7560<System.Object>()#" call void Generated784::M.IBase1.T<class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method4.7556()#G2_C302::Method5.MI.7558()#G2_C302::Method6.MI.7560<System.Object>()#" call void Generated784::M.IBase1.A<class G2_C302`2<class BaseClass0,class BaseClass0>>(!!0,string) newobj instance void class G2_C302`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.T.T<class BaseClass0,class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.T<class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.B<class G2_C302`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.A<class G2_C302`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.A<class G2_C302`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.B<class G2_C302`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.B<class G2_C302`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.T.T<class BaseClass0,class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.A.T<class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.A.B<class G2_C302`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::Method4.7556()#G2_C302::Method5.MI.7558()#G2_C302::Method6.MI.7560<System.Object>()#" call void Generated784::M.IBase1.T<class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::Method4.7556()#G2_C302::Method5.MI.7558()#G2_C302::Method6.MI.7560<System.Object>()#" call void Generated784::M.IBase1.A<class G2_C302`2<class BaseClass0,class BaseClass1>>(!!0,string) newobj instance void class G2_C302`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.T.T<class BaseClass1,class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.B.T<class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.B.B<class G2_C302`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.B<class G2_C302`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.B<class G2_C302`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.T.T<class BaseClass1,class BaseClass0,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.B.T<class BaseClass0,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.B.A<class G2_C302`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass0,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.A<class G2_C302`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method4.7556()#G2_C302::Method5.MI.7558()#G2_C302::Method6.MI.7560<System.Object>()#" call void Generated784::M.IBase1.T<class BaseClass0,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method4.7556()#G2_C302::Method5.MI.7558()#G2_C302::Method6.MI.7560<System.Object>()#" call void Generated784::M.IBase1.A<class G2_C302`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass0,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.A<class G2_C302`2<class BaseClass1,class BaseClass0>>(!!0,string) newobj instance void class G2_C302`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.T.T<class BaseClass1,class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.B.T<class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.B.B<class G2_C302`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.B<class G2_C302`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.B<class G2_C302`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.T.T<class BaseClass1,class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.B.T<class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.B.B<class G2_C302`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::Method4.7556()#G2_C302::Method5.MI.7558()#G2_C302::Method6.MI.7560<System.Object>()#" call void Generated784::M.IBase1.T<class BaseClass0,class G2_C302`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::Method4.7556()#G2_C302::Method5.MI.7558()#G2_C302::Method6.MI.7560<System.Object>()#" call void Generated784::M.IBase1.A<class G2_C302`2<class BaseClass1,class BaseClass1>>(!!0,string) newobj instance void class G1_C6`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.T.T<class BaseClass0,class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.T<class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.A<class G1_C6`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.A<class G1_C6`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.A<class G1_C6`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.B<class G1_C6`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.B<class G1_C6`2<class BaseClass0,class BaseClass0>>(!!0,string) newobj instance void class G1_C6`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.T.T<class BaseClass0,class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.T<class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.B<class G1_C6`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.A<class G1_C6`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.A<class G1_C6`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.B<class G1_C6`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.B<class G1_C6`2<class BaseClass0,class BaseClass1>>(!!0,string) newobj instance void class G1_C6`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.T.T<class BaseClass1,class BaseClass0,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.B.T<class BaseClass0,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.B.A<class G1_C6`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.B<class G1_C6`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.B<class G1_C6`2<class BaseClass1,class BaseClass0>>(!!0,string) newobj instance void class G1_C6`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.T.T<class BaseClass1,class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.B.T<class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.B.B<class G1_C6`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.B<class G1_C6`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.B<class G1_C6`2<class BaseClass1,class BaseClass1>>(!!0,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed { .maxstack 10 ldstr "===================== Struct Constrained Interface Calls Test =====================" call void [mscorlib]System.Console::WriteLine(string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void CalliTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calli Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1256`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1936() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G3_C1256::Method6.15147<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G3_C1256::Method4.15145()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1326() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G3_C1256::Method4.15145()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G3_C1256::Method5.15146()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G3_C1256::Method6.MI.15148<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G3_C1256::Method6.15147<System.Object>()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass0>::Method5() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G3_C1256::Method5.15146()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass0>::Method4() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G3_C1256::Method4.15145()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass0>::ClassMethod1936() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass0>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass0>::ClassMethod1327<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass0>::ClassMethod1326() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G3_C1256`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1936() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G3_C1256::Method6.15147<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G3_C1256::Method4.15145()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1326() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G3_C1256::Method4.15145()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G3_C1256::Method5.15146()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G3_C1256::Method6.MI.15148<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G3_C1256::Method6.15147<System.Object>()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass1>::Method5() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G3_C1256::Method5.15146()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass1>::Method4() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G3_C1256::Method4.15145()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass1>::ClassMethod1936() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass1>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass1>::ClassMethod1327<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass1>::ClassMethod1326() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G3_C1256::Method4.15145()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G3_C1256::Method5.15146()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G3_C1256::Method6.MI.15148<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C302`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass0>::ClassMethod1936() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::Method6.7559<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::Method4.7556()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass0>::ClassMethod1326() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::Method4.7556()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::Method5.MI.7558()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::Method6.MI.7560<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C302`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1936() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::Method6.7559<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::Method4.7556()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1326() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::Method4.7556()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::Method5.MI.7558()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::Method6.MI.7560<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C302`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass0>::ClassMethod1936() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method6<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::Method6.7559<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method5() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method4() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::Method4.7556()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass0>::ClassMethod1326() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::Method4.7556()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::Method5.MI.7558()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::Method6.MI.7560<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C302`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass1>::ClassMethod1936() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::Method6.7559<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::Method4.7556()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass1>::ClassMethod1326() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::Method4.7556()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::Method5.MI.7558()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::Method6.MI.7560<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G1_C6`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>) ldstr "G1_C6::ClassMethod1327.4810<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1326() calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>) ldstr "G1_C6::ClassMethod1326.4809()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G1_C6`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>) ldstr "G1_C6::ClassMethod1327.4810<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326() calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>) ldstr "G1_C6::ClassMethod1326.4809()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G1_C6`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>() calli default string(class G1_C6`2<class BaseClass1,class BaseClass0>) ldstr "G1_C6::ClassMethod1327.4810<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326() calli default string(class G1_C6`2<class BaseClass1,class BaseClass0>) ldstr "G1_C6::ClassMethod1326.4809()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G1_C6`2<class BaseClass1,class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G1_C6`2<class BaseClass1,class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G1_C6`2<class BaseClass1,class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G1_C6`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>() calli default string(class G1_C6`2<class BaseClass1,class BaseClass1>) ldstr "G1_C6::ClassMethod1327.4810<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326() calli default string(class G1_C6`2<class BaseClass1,class BaseClass1>) ldstr "G1_C6::ClassMethod1326.4809()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G1_C6`2<class BaseClass1,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G1_C6`2<class BaseClass1,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G1_C6`2<class BaseClass1,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 10 call void Generated784::MethodCallingTest() call void Generated784::ConstrainedCallsTest() call void Generated784::StructConstrainedInterfaceCallsTest() call void Generated784::CalliTest() ldc.i4 100 ret } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 } .assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) } //TYPES IN FORWARDER ASSEMBLIES: //TEST ASSEMBLY: .assembly Generated784 { .hash algorithm 0x00008004 } .assembly extern xunit.core {} .class public BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class public BaseClass1 extends BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void BaseClass0::.ctor() ret } } .class public G3_C1256`1<T0> extends class G2_C302`2<class BaseClass0,class BaseClass1> implements class IBase1`1<!T0> { .method public hidebysig virtual instance string Method4() cil managed noinlining { ldstr "G3_C1256::Method4.15145()" ret } .method public hidebysig newslot virtual instance string Method5() cil managed noinlining { ldstr "G3_C1256::Method5.15146()" ret } .method public hidebysig virtual instance string Method6<M0>() cil managed noinlining { ldstr "G3_C1256::Method6.15147<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase1<T0>.Method6'<M0>() cil managed noinlining { .override method instance string class IBase1`1<!T0>::Method6<[1]>() ldstr "G3_C1256::Method6.MI.15148<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G2_C302`2<class BaseClass0,class BaseClass1>::.ctor() ret } } .class public G2_C302`2<T0, T1> extends class G1_C6`2<!T0,class BaseClass1> implements class IBase2`2<class BaseClass1,!T1>, class IBase1`1<class BaseClass0> { .method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining { ldstr "G2_C302::Method7.7555<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig virtual instance string Method4() cil managed noinlining { ldstr "G2_C302::Method4.7556()" ret } .method public hidebysig virtual instance string Method5() cil managed noinlining { ldstr "G2_C302::Method5.7557()" ret } .method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method5'() cil managed noinlining { .override method instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G2_C302::Method5.MI.7558()" ret } .method public hidebysig newslot virtual instance string Method6<M0>() cil managed noinlining { ldstr "G2_C302::Method6.7559<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method6'<M0>() cil managed noinlining { .override method instance string class IBase1`1<class BaseClass0>::Method6<[1]>() ldstr "G2_C302::Method6.MI.7560<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod1936() cil managed noinlining { ldstr "G2_C302::ClassMethod1936.7561()" ret } .method public hidebysig newslot virtual instance string 'G1_C6<T0,class BaseClass1>.ClassMethod1326'() cil managed noinlining { .override method instance string class G1_C6`2<!T0,class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ret } .method public hidebysig newslot virtual instance string 'G1_C6<T0,class BaseClass1>.ClassMethod1327'<M0>() cil managed noinlining { .override method instance string class G1_C6`2<!T0,class BaseClass1>::ClassMethod1327<[1]>() ldstr "G2_C302::ClassMethod1327.MI.7563<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G1_C6`2<!T0,class BaseClass1>::.ctor() ret } } .class interface public abstract IBase1`1<+T0> { .method public hidebysig newslot abstract virtual instance string Method4() cil managed { } .method public hidebysig newslot abstract virtual instance string Method5() cil managed { } .method public hidebysig newslot abstract virtual instance string Method6<M0>() cil managed { } } .class public G1_C6`2<T0, T1> implements class IBase2`2<class BaseClass1,!T0> { .method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining { ldstr "G1_C6::Method7.4808<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod1326() cil managed noinlining { ldstr "G1_C6::ClassMethod1326.4809()" ret } .method public hidebysig newslot virtual instance string ClassMethod1327<M0>() cil managed noinlining { ldstr "G1_C6::ClassMethod1327.4810<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class interface public abstract IBase2`2<+T0, -T1> { .method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { } } .class public auto ansi beforefieldinit Generated784 { .method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1256.T<T0,(class G3_C1256`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 12 .locals init (string[] actualResults) ldc.i4.s 7 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1256.T<T0,(class G3_C1256`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 7 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<!!T0>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<!!T0>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<!!T0>::ClassMethod1936() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<!!T0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1256.A<(class G3_C1256`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 12 .locals init (string[] actualResults) ldc.i4.s 7 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1256.A<(class G3_C1256`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 7 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass0>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass0>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass0>::ClassMethod1936() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1256.B<(class G3_C1256`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 12 .locals init (string[] actualResults) ldc.i4.s 7 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1256.B<(class G3_C1256`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 7 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass1>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass1>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass1>::ClassMethod1936() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1256`1<class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C302.T.T<T0,T1,(class G2_C302`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 12 .locals init (string[] actualResults) ldc.i4.s 7 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C302.T.T<T0,T1,(class G2_C302`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 7 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<!!T0,!!T1>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<!!T0,!!T1>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<!!T0,!!T1>::ClassMethod1936() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<!!T0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<!!T0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<!!T0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C302.A.T<T1,(class G2_C302`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 12 .locals init (string[] actualResults) ldc.i4.s 7 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C302.A.T<T1,(class G2_C302`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 7 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,!!T1>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,!!T1>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,!!T1>::ClassMethod1936() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C302.A.A<(class G2_C302`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 12 .locals init (string[] actualResults) ldc.i4.s 7 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C302.A.A<(class G2_C302`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 7 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::ClassMethod1936() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C302.A.B<(class G2_C302`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 12 .locals init (string[] actualResults) ldc.i4.s 7 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C302.A.B<(class G2_C302`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 7 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1936() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C302.B.T<T1,(class G2_C302`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 12 .locals init (string[] actualResults) ldc.i4.s 7 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C302.B.T<T1,(class G2_C302`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 7 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,!!T1>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,!!T1>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,!!T1>::ClassMethod1936() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C302.B.A<(class G2_C302`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 12 .locals init (string[] actualResults) ldc.i4.s 7 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C302.B.A<(class G2_C302`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 7 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::ClassMethod1936() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C302.B.B<(class G2_C302`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 12 .locals init (string[] actualResults) ldc.i4.s 7 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C302.B.B<(class G2_C302`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 7 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::ClassMethod1936() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C6.T.T<T0,T1,(class G1_C6`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C6.T.T<T0,T1,(class G1_C6`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<!!T0,!!T1>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<!!T0,!!T1>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C6.A.T<T1,(class G1_C6`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C6.A.T<T1,(class G1_C6`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass0,!!T1>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass0,!!T1>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C6.A.A<(class G1_C6`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C6.A.A<(class G1_C6`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C6.A.B<(class G1_C6`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C6.A.B<(class G1_C6`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C6.B.T<T1,(class G1_C6`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C6.B.T<T1,(class G1_C6`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass1,!!T1>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass1,!!T1>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C6.B.A<(class G1_C6`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C6.B.A<(class G1_C6`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C6.B.B<(class G1_C6`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C6.B.B<(class G1_C6`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method public hidebysig static void MethodCallingTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calling Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1256`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1936() ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G3_C1256::Method6.15147<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G3_C1256::Method4.15145()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G3_C1256::Method4.15145()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G3_C1256::Method5.15146()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G3_C1256::Method6.MI.15148<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1256`1<class BaseClass0> callvirt instance string class G3_C1256`1<class BaseClass0>::Method6<object>() ldstr "G3_C1256::Method6.15147<System.Object>()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass0> callvirt instance string class G3_C1256`1<class BaseClass0>::Method5() ldstr "G3_C1256::Method5.15146()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass0> callvirt instance string class G3_C1256`1<class BaseClass0>::Method4() ldstr "G3_C1256::Method4.15145()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass0> callvirt instance string class G3_C1256`1<class BaseClass0>::ClassMethod1936() ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass0> callvirt instance string class G3_C1256`1<class BaseClass0>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass0> callvirt instance string class G3_C1256`1<class BaseClass0>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass0> callvirt instance string class G3_C1256`1<class BaseClass0>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G3_C1256`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1936() ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G3_C1256::Method6.15147<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G3_C1256::Method4.15145()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G3_C1256::Method4.15145()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G3_C1256::Method5.15146()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G3_C1256::Method6.MI.15148<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1256`1<class BaseClass1> callvirt instance string class G3_C1256`1<class BaseClass1>::Method6<object>() ldstr "G3_C1256::Method6.15147<System.Object>()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass1> callvirt instance string class G3_C1256`1<class BaseClass1>::Method5() ldstr "G3_C1256::Method5.15146()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass1> callvirt instance string class G3_C1256`1<class BaseClass1>::Method4() ldstr "G3_C1256::Method4.15145()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass1> callvirt instance string class G3_C1256`1<class BaseClass1>::ClassMethod1936() ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass1> callvirt instance string class G3_C1256`1<class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass1> callvirt instance string class G3_C1256`1<class BaseClass1>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1256`1<class BaseClass1> callvirt instance string class G3_C1256`1<class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G3_C1256::Method4.15145()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G3_C1256::Method5.15146()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G3_C1256::Method6.MI.15148<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C302`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C302`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::ClassMethod1936() ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G2_C302::Method6.7559<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G2_C302::Method4.7556()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass0>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G2_C302::Method4.7556()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G2_C302::Method5.MI.7558()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G2_C302::Method6.MI.7560<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C302`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1936() ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G2_C302::Method6.7559<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G2_C302::Method4.7556()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G2_C302::Method4.7556()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G2_C302::Method5.MI.7558()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G2_C302::Method6.MI.7560<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C302`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C6`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C302`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::ClassMethod1936() ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method6<object>() ldstr "G2_C302::Method6.7559<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method5() ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method4() ldstr "G2_C302::Method4.7556()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass0>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G2_C302::Method4.7556()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G2_C302::Method5.MI.7558()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G2_C302::Method6.MI.7560<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C302`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C6`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C302`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::ClassMethod1936() ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G2_C302::Method6.7559<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G2_C302::Method4.7556()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>() ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C302`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C302`2<class BaseClass1,class BaseClass1>::ClassMethod1326() ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G2_C302::Method4.7556()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G2_C302::Method5.MI.7558()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G2_C302::Method6.MI.7560<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G1_C6`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C6`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>() ldstr "G1_C6::ClassMethod1327.4810<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1326() ldstr "G1_C6::ClassMethod1326.4809()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass0> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G1_C6`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() ldstr "G1_C6::ClassMethod1327.4810<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326() ldstr "G1_C6::ClassMethod1326.4809()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G1_C6`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C6`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>() ldstr "G1_C6::ClassMethod1327.4810<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326() ldstr "G1_C6::ClassMethod1326.4809()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G1_C6`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C6`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>() ldstr "G1_C6::ClassMethod1327.4810<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326() ldstr "G1_C6::ClassMethod1326.4809()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C6`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void ConstrainedCallsTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Constrained Calls Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1256`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.T.T<class BaseClass0,class BaseClass1,class G3_C1256`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.T<class BaseClass1,class G3_C1256`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.B<class G3_C1256`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G3_C1256`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass0,class G3_C1256`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.A<class G3_C1256`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1256`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass0,class G3_C1256`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.A<class G3_C1256`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1256`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass1,class G3_C1256`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.B<class G3_C1256`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1256`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass1,class G3_C1256`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.B<class G3_C1256`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G3_C1256::Method4.15145()#G2_C302::Method5.7557()#G3_C1256::Method6.15147<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.T.T<class BaseClass0,class BaseClass1,class G3_C1256`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G3_C1256::Method4.15145()#G2_C302::Method5.7557()#G3_C1256::Method6.15147<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.A.T<class BaseClass1,class G3_C1256`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G3_C1256::Method4.15145()#G2_C302::Method5.7557()#G3_C1256::Method6.15147<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.A.B<class G3_C1256`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1256::Method4.15145()#G3_C1256::Method5.15146()#G3_C1256::Method6.MI.15148<System.Object>()#" call void Generated784::M.IBase1.T<class BaseClass0,class G3_C1256`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1256::Method4.15145()#G3_C1256::Method5.15146()#G3_C1256::Method6.MI.15148<System.Object>()#" call void Generated784::M.IBase1.A<class G3_C1256`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G3_C1256::Method4.15145()#G3_C1256::Method5.15146()#G3_C1256::Method6.15147<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G3_C1256.T<class BaseClass0,class G3_C1256`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G3_C1256::Method4.15145()#G3_C1256::Method5.15146()#G3_C1256::Method6.15147<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G3_C1256.A<class G3_C1256`1<class BaseClass0>>(!!0,string) newobj instance void class G3_C1256`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.T.T<class BaseClass0,class BaseClass1,class G3_C1256`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.T<class BaseClass1,class G3_C1256`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.B<class G3_C1256`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G3_C1256`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass0,class G3_C1256`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.A<class G3_C1256`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1256`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass0,class G3_C1256`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.A<class G3_C1256`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1256`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass1,class G3_C1256`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.B<class G3_C1256`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1256`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass1,class G3_C1256`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.B<class G3_C1256`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G3_C1256::Method4.15145()#G2_C302::Method5.7557()#G3_C1256::Method6.15147<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.T.T<class BaseClass0,class BaseClass1,class G3_C1256`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G3_C1256::Method4.15145()#G2_C302::Method5.7557()#G3_C1256::Method6.15147<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.A.T<class BaseClass1,class G3_C1256`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G3_C1256::Method4.15145()#G2_C302::Method5.7557()#G3_C1256::Method6.15147<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.A.B<class G3_C1256`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1256::Method4.15145()#G3_C1256::Method5.15146()#G3_C1256::Method6.MI.15148<System.Object>()#" call void Generated784::M.IBase1.T<class BaseClass0,class G3_C1256`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1256::Method4.15145()#G3_C1256::Method5.15146()#G3_C1256::Method6.MI.15148<System.Object>()#" call void Generated784::M.IBase1.A<class G3_C1256`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G3_C1256::Method4.15145()#G3_C1256::Method5.15146()#G3_C1256::Method6.15147<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G3_C1256.T<class BaseClass1,class G3_C1256`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G3_C1256::Method4.15145()#G3_C1256::Method5.15146()#G3_C1256::Method6.15147<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G3_C1256.B<class G3_C1256`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1256::Method4.15145()#G3_C1256::Method5.15146()#G3_C1256::Method6.MI.15148<System.Object>()#" call void Generated784::M.IBase1.T<class BaseClass1,class G3_C1256`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1256::Method4.15145()#G3_C1256::Method5.15146()#G3_C1256::Method6.MI.15148<System.Object>()#" call void Generated784::M.IBase1.B<class G3_C1256`1<class BaseClass1>>(!!0,string) newobj instance void class G2_C302`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.T.T<class BaseClass0,class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.T<class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.B<class G2_C302`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.A<class G2_C302`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.A<class G2_C302`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.B<class G2_C302`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.B<class G2_C302`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.T.T<class BaseClass0,class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.A.T<class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.A.A<class G2_C302`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method4.7556()#G2_C302::Method5.MI.7558()#G2_C302::Method6.MI.7560<System.Object>()#" call void Generated784::M.IBase1.T<class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method4.7556()#G2_C302::Method5.MI.7558()#G2_C302::Method6.MI.7560<System.Object>()#" call void Generated784::M.IBase1.A<class G2_C302`2<class BaseClass0,class BaseClass0>>(!!0,string) newobj instance void class G2_C302`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.T.T<class BaseClass0,class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.T<class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.B<class G2_C302`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.A<class G2_C302`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.A<class G2_C302`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.B<class G2_C302`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.B<class G2_C302`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.T.T<class BaseClass0,class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.A.T<class BaseClass1,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.A.B<class G2_C302`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::Method4.7556()#G2_C302::Method5.MI.7558()#G2_C302::Method6.MI.7560<System.Object>()#" call void Generated784::M.IBase1.T<class BaseClass0,class G2_C302`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::Method4.7556()#G2_C302::Method5.MI.7558()#G2_C302::Method6.MI.7560<System.Object>()#" call void Generated784::M.IBase1.A<class G2_C302`2<class BaseClass0,class BaseClass1>>(!!0,string) newobj instance void class G2_C302`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.T.T<class BaseClass1,class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.B.T<class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.B.B<class G2_C302`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.B<class G2_C302`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.B<class G2_C302`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.T.T<class BaseClass1,class BaseClass0,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.B.T<class BaseClass0,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.B.A<class G2_C302`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass0,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.A<class G2_C302`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method4.7556()#G2_C302::Method5.MI.7558()#G2_C302::Method6.MI.7560<System.Object>()#" call void Generated784::M.IBase1.T<class BaseClass0,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method4.7556()#G2_C302::Method5.MI.7558()#G2_C302::Method6.MI.7560<System.Object>()#" call void Generated784::M.IBase1.A<class G2_C302`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass0,class G2_C302`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.A<class G2_C302`2<class BaseClass1,class BaseClass0>>(!!0,string) newobj instance void class G2_C302`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.T.T<class BaseClass1,class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.B.T<class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.B.B<class G2_C302`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.B.B<class G2_C302`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.IBase2.A.B<class G2_C302`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.T.T<class BaseClass1,class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.B.T<class BaseClass1,class G2_C302`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::ClassMethod1326.MI.7562()#G2_C302::ClassMethod1327.MI.7563<System.Object>()#G2_C302::ClassMethod1936.7561()#G2_C302::Method4.7556()#G2_C302::Method5.7557()#G2_C302::Method6.7559<System.Object>()#G2_C302::Method7.7555<System.Object>()#" call void Generated784::M.G2_C302.B.B<class G2_C302`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C302::Method4.7556()#G2_C302::Method5.MI.7558()#G2_C302::Method6.MI.7560<System.Object>()#" call void Generated784::M.IBase1.T<class BaseClass0,class G2_C302`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C302::Method4.7556()#G2_C302::Method5.MI.7558()#G2_C302::Method6.MI.7560<System.Object>()#" call void Generated784::M.IBase1.A<class G2_C302`2<class BaseClass1,class BaseClass1>>(!!0,string) newobj instance void class G1_C6`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.T.T<class BaseClass0,class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.T<class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.A<class G1_C6`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.A<class G1_C6`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.A<class G1_C6`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.B<class G1_C6`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.B<class G1_C6`2<class BaseClass0,class BaseClass0>>(!!0,string) newobj instance void class G1_C6`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.T.T<class BaseClass0,class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.T<class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.A.B<class G1_C6`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.A<class G1_C6`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.A<class G1_C6`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.B<class G1_C6`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.B<class G1_C6`2<class BaseClass0,class BaseClass1>>(!!0,string) newobj instance void class G1_C6`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.T.T<class BaseClass1,class BaseClass0,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.B.T<class BaseClass0,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.B.A<class G1_C6`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.B<class G1_C6`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.B<class G1_C6`2<class BaseClass1,class BaseClass0>>(!!0,string) newobj instance void class G1_C6`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.T.T<class BaseClass1,class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.B.T<class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.G1_C6.B.B<class G1_C6`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.T<class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.B.B<class G1_C6`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.T<class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C6::Method7.4808<System.Object>()#" call void Generated784::M.IBase2.A.B<class G1_C6`2<class BaseClass1,class BaseClass1>>(!!0,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed { .maxstack 10 ldstr "===================== Struct Constrained Interface Calls Test =====================" call void [mscorlib]System.Console::WriteLine(string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void CalliTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calli Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1256`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1936() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G3_C1256::Method6.15147<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G3_C1256::Method4.15145()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1326() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G3_C1256::Method4.15145()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G3_C1256::Method5.15146()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G3_C1256::Method6.MI.15148<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G3_C1256::Method6.15147<System.Object>()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass0>::Method5() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G3_C1256::Method5.15146()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass0>::Method4() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G3_C1256::Method4.15145()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass0>::ClassMethod1936() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass0>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass0>::ClassMethod1327<object>() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass0>::ClassMethod1326() calli default string(class G3_C1256`1<class BaseClass0>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G3_C1256`1<class BaseClass0> on type class G3_C1256`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G3_C1256`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1936() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G3_C1256::Method6.15147<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G3_C1256::Method4.15145()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1326() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G3_C1256::Method4.15145()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G3_C1256::Method5.15146()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G3_C1256::Method6.MI.15148<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G3_C1256::Method6.15147<System.Object>()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass1>::Method5() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G3_C1256::Method5.15146()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass1>::Method4() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G3_C1256::Method4.15145()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass1>::ClassMethod1936() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass1>::Method7<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass1>::ClassMethod1327<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1256`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1256`1<class BaseClass1>::ClassMethod1326() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G3_C1256`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G3_C1256::Method4.15145()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G3_C1256::Method5.15146()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1256`1<class BaseClass1>) ldstr "G3_C1256::Method6.MI.15148<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1256`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C302`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass0>::ClassMethod1936() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::Method6.7559<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::Method4.7556()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass0>::ClassMethod1326() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::Method4.7556()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::Method5.MI.7558()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass0>) ldstr "G2_C302::Method6.MI.7560<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C302`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1936() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::Method6.7559<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::Method4.7556()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass0,class BaseClass1>::ClassMethod1326() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::Method4.7556()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::Method5.MI.7558()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C302`2<class BaseClass0,class BaseClass1>) ldstr "G2_C302::Method6.MI.7560<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C302`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass0>::ClassMethod1936() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method6<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::Method6.7559<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method5() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method4() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::Method4.7556()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass0>::ClassMethod1326() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::Method4.7556()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::Method5.MI.7558()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::Method6.MI.7560<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass0>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C302`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass1>::ClassMethod1936() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::ClassMethod1936.7561()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::Method6.7559<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::Method5.7557()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::Method4.7556()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::Method7.7555<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::ClassMethod1327.MI.7563<System.Object>()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C302`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C302`2<class BaseClass1,class BaseClass1>::ClassMethod1326() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::ClassMethod1326.MI.7562()" ldstr "class G2_C302`2<class BaseClass1,class BaseClass1> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::Method4.7556()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::Method5.MI.7558()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C302`2<class BaseClass1,class BaseClass1>) ldstr "G2_C302::Method6.MI.7560<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C302`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G1_C6`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>) ldstr "G1_C6::ClassMethod1327.4810<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1326() calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>) ldstr "G1_C6::ClassMethod1326.4809()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G1_C6`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>) ldstr "G1_C6::ClassMethod1327.4810<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326() calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>) ldstr "G1_C6::ClassMethod1326.4809()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G1_C6`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>() calli default string(class G1_C6`2<class BaseClass1,class BaseClass0>) ldstr "G1_C6::ClassMethod1327.4810<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326() calli default string(class G1_C6`2<class BaseClass1,class BaseClass0>) ldstr "G1_C6::ClassMethod1326.4809()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G1_C6`2<class BaseClass1,class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G1_C6`2<class BaseClass1,class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G1_C6`2<class BaseClass1,class BaseClass0>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G1_C6`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>() calli default string(class G1_C6`2<class BaseClass1,class BaseClass1>) ldstr "G1_C6::ClassMethod1327.4810<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326() calli default string(class G1_C6`2<class BaseClass1,class BaseClass1>) ldstr "G1_C6::ClassMethod1326.4809()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C6`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G1_C6`2<class BaseClass1,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G1_C6`2<class BaseClass1,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G1_C6`2<class BaseClass1,class BaseClass1>) ldstr "G1_C6::Method7.4808<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 10 call void Generated784::MethodCallingTest() call void Generated784::ConstrainedCallsTest() call void Generated784::StructConstrainedInterfaceCallsTest() call void Generated784::CalliTest() ldc.i4 100 ret } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/HardwareIntrinsics/General/Vector64/Max.Int16.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void MaxInt16() { var test = new VectorBinaryOpTest__MaxInt16(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorBinaryOpTest__MaxInt16 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int16[] inArray1, Int16[] inArray2, Int16[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int16>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int16>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int16>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int16, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int16, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Int16> _fld1; public Vector64<Int16> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); return testStruct; } public void RunStructFldScenario(VectorBinaryOpTest__MaxInt16 testClass) { var result = Vector64.Max(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16); private static Int16[] _data1 = new Int16[Op1ElementCount]; private static Int16[] _data2 = new Int16[Op2ElementCount]; private static Vector64<Int16> _clsVar1; private static Vector64<Int16> _clsVar2; private Vector64<Int16> _fld1; private Vector64<Int16> _fld2; private DataTable _dataTable; static VectorBinaryOpTest__MaxInt16() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); } public VectorBinaryOpTest__MaxInt16() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); } _dataTable = new DataTable(_data1, _data2, new Int16[RetElementCount], LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Vector64.Max( Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var method = typeof(Vector64).GetMethod(nameof(Vector64.Max), new Type[] { typeof(Vector64<Int16>), typeof(Vector64<Int16>) }); if (method is null) { method = typeof(Vector64).GetMethod(nameof(Vector64.Max), 1, new Type[] { typeof(Vector64<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(Vector64<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) }); } if (method.IsGenericMethodDefinition) { method = method.MakeGenericMethod(typeof(Int16)); } var result = method.Invoke(null, new object[] { Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Vector64.Max( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr); var result = Vector64.Max(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorBinaryOpTest__MaxInt16(); var result = Vector64.Max(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Vector64.Max(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Vector64.Max(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector64<Int16> op1, Vector64<Int16> op2, void* result, [CallerMemberName] string method = "") { Int16[] inArray1 = new Int16[Op1ElementCount]; Int16[] inArray2 = new Int16[Op2ElementCount]; Int16[] outArray = new Int16[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int16>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Int16[] inArray1 = new Int16[Op1ElementCount]; Int16[] inArray2 = new Int16[Op2ElementCount]; Int16[] outArray = new Int16[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Int16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int16>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int16[] left, Int16[] right, Int16[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != ((left[0] > right[0]) ? left[0] : right[0])) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != ((left[i] > right[i]) ? left[i] : right[i])) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector64)}.{nameof(Vector64.Max)}<Int16>(Vector64<Int16>, Vector64<Int16>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void MaxInt16() { var test = new VectorBinaryOpTest__MaxInt16(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorBinaryOpTest__MaxInt16 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int16[] inArray1, Int16[] inArray2, Int16[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int16>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int16>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int16>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int16, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int16, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Int16> _fld1; public Vector64<Int16> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); return testStruct; } public void RunStructFldScenario(VectorBinaryOpTest__MaxInt16 testClass) { var result = Vector64.Max(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16); private static Int16[] _data1 = new Int16[Op1ElementCount]; private static Int16[] _data2 = new Int16[Op2ElementCount]; private static Vector64<Int16> _clsVar1; private static Vector64<Int16> _clsVar2; private Vector64<Int16> _fld1; private Vector64<Int16> _fld2; private DataTable _dataTable; static VectorBinaryOpTest__MaxInt16() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); } public VectorBinaryOpTest__MaxInt16() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); } _dataTable = new DataTable(_data1, _data2, new Int16[RetElementCount], LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Vector64.Max( Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var method = typeof(Vector64).GetMethod(nameof(Vector64.Max), new Type[] { typeof(Vector64<Int16>), typeof(Vector64<Int16>) }); if (method is null) { method = typeof(Vector64).GetMethod(nameof(Vector64.Max), 1, new Type[] { typeof(Vector64<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(Vector64<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) }); } if (method.IsGenericMethodDefinition) { method = method.MakeGenericMethod(typeof(Int16)); } var result = method.Invoke(null, new object[] { Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Vector64.Max( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr); var result = Vector64.Max(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorBinaryOpTest__MaxInt16(); var result = Vector64.Max(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Vector64.Max(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Vector64.Max(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector64<Int16> op1, Vector64<Int16> op2, void* result, [CallerMemberName] string method = "") { Int16[] inArray1 = new Int16[Op1ElementCount]; Int16[] inArray2 = new Int16[Op2ElementCount]; Int16[] outArray = new Int16[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int16>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Int16[] inArray1 = new Int16[Op1ElementCount]; Int16[] inArray2 = new Int16[Op2ElementCount]; Int16[] outArray = new Int16[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Int16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int16>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int16[] left, Int16[] right, Int16[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != ((left[0] > right[0]) ? left[0] : right[0])) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != ((left[i] > right[i]) ? left[i] : right[i])) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector64)}.{nameof(Vector64.Max)}<Int16>(Vector64<Int16>, Vector64<Int16>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/jit64/gc/misc/structret4_1.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // using System; struct Pad { #pragma warning disable 0414 public double d1; public double d2; public double d3; public double d4; public double d5; public double d6; public double d7; public double d8; public double d9; public double d10; public double d11; public double d12; public double d13; public double d14; public double d15; public double d16; public double d17; public double d18; public double d19; public double d20; public double d21; public double d22; public double d23; public double d24; public double d25; public double d26; public double d27; public double d28; public double d29; public double d30; } struct S { public String str2; #pragma warning restore 0414 public String str; public Pad pad; public S(String s) { str = s; str2 = s + str; pad.d1 = pad.d2 = pad.d3 = pad.d4 = pad.d5 = pad.d6 = pad.d7 = pad.d8 = pad.d9 = pad.d10 = pad.d11 = pad.d12 = pad.d13 = pad.d14 = pad.d15 = pad.d16 = pad.d17 = pad.d18 = pad.d19 = pad.d20 = pad.d21 = pad.d22 = pad.d23 = pad.d24 = pad.d25 = pad.d26 = pad.d27 = pad.d28 = pad.d29 = pad.d30 = 3.3; } } class Test_structret4_1 { public static S c(S s1, S s2, S s3) { s1.str = (s1.str + s2.str + s3.str); return s1; } public static int Main() { S sM = new S("test"); S sM2 = new S("test2"); S sM3 = new S("test3"); Console.WriteLine(c(sM, sM2, sM3)); return 100; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // using System; struct Pad { #pragma warning disable 0414 public double d1; public double d2; public double d3; public double d4; public double d5; public double d6; public double d7; public double d8; public double d9; public double d10; public double d11; public double d12; public double d13; public double d14; public double d15; public double d16; public double d17; public double d18; public double d19; public double d20; public double d21; public double d22; public double d23; public double d24; public double d25; public double d26; public double d27; public double d28; public double d29; public double d30; } struct S { public String str2; #pragma warning restore 0414 public String str; public Pad pad; public S(String s) { str = s; str2 = s + str; pad.d1 = pad.d2 = pad.d3 = pad.d4 = pad.d5 = pad.d6 = pad.d7 = pad.d8 = pad.d9 = pad.d10 = pad.d11 = pad.d12 = pad.d13 = pad.d14 = pad.d15 = pad.d16 = pad.d17 = pad.d18 = pad.d19 = pad.d20 = pad.d21 = pad.d22 = pad.d23 = pad.d24 = pad.d25 = pad.d26 = pad.d27 = pad.d28 = pad.d29 = pad.d30 = 3.3; } } class Test_structret4_1 { public static S c(S s1, S s2, S s3) { s1.str = (s1.str + s2.str + s3.str); return s1; } public static int Main() { S sM = new S("test"); S sM2 = new S("test2"); S sM3 = new S("test3"); Console.WriteLine(c(sM, sM2, sM3)); return 100; } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/Regression/CLR-x86-JIT/V1-M09.5-PDC/b26332/b26332.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).il" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/libraries/System.Text.Json/tests/System.Text.Json.Tests/Serialization/DynamicTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Dynamic; using System.Linq; using Xunit; namespace System.Text.Json.Serialization.Tests { public static class DynamicTests { public const string Json = "{\"MyString\":\"Hello\",\"MyNull\":null,\"MyBoolean\":true,\"MyArray\":[1,2],\"MyInt\":42,\"MyDateTime\":\"2020-07-08T00:00:00\",\"MyGuid\":\"ed957609-cdfe-412f-88c1-02daca1b4f51\",\"MyObject\":{\"MyString\":\"World\"}}"; public static DateTime MyDateTime => new DateTime(2020, 7, 8); public static Guid MyGuid => new Guid("ed957609-cdfe-412f-88c1-02daca1b4f51"); internal static ExpandoObject GetExpandoObject() { dynamic myDynamicChild = new ExpandoObject(); myDynamicChild.MyString = "World"; dynamic myDynamic = new ExpandoObject(); myDynamic.MyString = "Hello"; myDynamic.MyNull = null; myDynamic.MyBoolean = true; myDynamic.MyArray = new List<int>() { 1, 2 }; myDynamic.MyInt = 42; myDynamic.MyDateTime = MyDateTime; myDynamic.MyGuid = MyGuid; myDynamic.MyObject = myDynamicChild; // Verify basic dynamic support. int c = myDynamic.MyInt; Assert.Equal(42, c); return myDynamic; } [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsReflectionEmitSupported))] public static void DynamicKeyword() { dynamic myDynamic = GetExpandoObject(); // STJ serializes ExpandoObject as IDictionary<string, object>; // there is no custom converter for ExpandoObject. string json = JsonSerializer.Serialize<dynamic>(myDynamic); JsonTestHelper.AssertJsonEqual(Json, json); dynamic d = JsonSerializer.Deserialize<dynamic>(json); try { // We will get an exception here if we try to access a dynamic property since 'object' is deserialized // as a JsonElement and not an ExpandoObject. int c = d.MyInt; Assert.True(false, "Should have thrown Exception!"); } catch (Microsoft.CSharp.RuntimeBinder.RuntimeBinderException) { } Assert.IsType<JsonElement>(d); JsonElement elem = (JsonElement)d; VerifyPrimitives(); VerifyObject(); VerifyArray(); // Re-serialize json = JsonSerializer.Serialize<object>(elem); JsonTestHelper.AssertJsonEqual(Json, json); json = JsonSerializer.Serialize<dynamic>(elem); JsonTestHelper.AssertJsonEqual(Json, json); json = JsonSerializer.Serialize(elem); JsonTestHelper.AssertJsonEqual(Json, json); void VerifyPrimitives() { Assert.Equal("Hello", elem.GetProperty("MyString").GetString()); Assert.True(elem.GetProperty("MyBoolean").GetBoolean()); Assert.Equal(42, elem.GetProperty("MyInt").GetInt32()); Assert.Equal(MyDateTime, elem.GetProperty("MyDateTime").GetDateTime()); Assert.Equal(MyGuid, elem.GetProperty("MyGuid").GetGuid()); } void VerifyObject() { Assert.Equal("World", elem.GetProperty("MyObject").GetProperty("MyString").GetString()); } void VerifyArray() { JsonElement.ArrayEnumerator enumerator = elem.GetProperty("MyArray").EnumerateArray(); Assert.Equal(2, enumerator.Count()); enumerator.MoveNext(); Assert.Equal(1, enumerator.Current.GetInt32()); enumerator.MoveNext(); Assert.Equal(2, enumerator.Current.GetInt32()); } } [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsReflectionEmitSupported))] public static void ExpandoObject() { ExpandoObject expando = JsonSerializer.Deserialize<ExpandoObject>(Json); Assert.Equal(8, ((IDictionary<string, object>)expando).Keys.Count); dynamic obj = expando; VerifyPrimitives(); VerifyObject(); VerifyArray(); // Re-serialize string json = JsonSerializer.Serialize<ExpandoObject>(obj); JsonTestHelper.AssertJsonEqual(Json, json); json = JsonSerializer.Serialize<dynamic>(obj); JsonTestHelper.AssertJsonEqual(Json, json); json = JsonSerializer.Serialize(obj); JsonTestHelper.AssertJsonEqual(Json, json); void VerifyPrimitives() { JsonElement jsonElement = obj.MyString; Assert.Equal("Hello", jsonElement.GetString()); jsonElement = obj.MyBoolean; Assert.True(jsonElement.GetBoolean()); jsonElement = obj.MyInt; Assert.Equal(42, jsonElement.GetInt32()); jsonElement = obj.MyDateTime; Assert.Equal(MyDateTime, jsonElement.GetDateTime()); jsonElement = obj.MyGuid; Assert.Equal(MyGuid, jsonElement.GetGuid()); } void VerifyObject() { JsonElement jsonElement = obj.MyObject; // Here we access a property on a nested object and must use JsonElement (not a dynamic property). Assert.Equal("World", jsonElement.GetProperty("MyString").GetString()); } void VerifyArray() { JsonElement jsonElement = obj.MyArray; Assert.Equal(2, jsonElement.EnumerateArray().Count()); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Dynamic; using System.Linq; using Xunit; namespace System.Text.Json.Serialization.Tests { public static class DynamicTests { public const string Json = "{\"MyString\":\"Hello\",\"MyNull\":null,\"MyBoolean\":true,\"MyArray\":[1,2],\"MyInt\":42,\"MyDateTime\":\"2020-07-08T00:00:00\",\"MyGuid\":\"ed957609-cdfe-412f-88c1-02daca1b4f51\",\"MyObject\":{\"MyString\":\"World\"}}"; public static DateTime MyDateTime => new DateTime(2020, 7, 8); public static Guid MyGuid => new Guid("ed957609-cdfe-412f-88c1-02daca1b4f51"); internal static ExpandoObject GetExpandoObject() { dynamic myDynamicChild = new ExpandoObject(); myDynamicChild.MyString = "World"; dynamic myDynamic = new ExpandoObject(); myDynamic.MyString = "Hello"; myDynamic.MyNull = null; myDynamic.MyBoolean = true; myDynamic.MyArray = new List<int>() { 1, 2 }; myDynamic.MyInt = 42; myDynamic.MyDateTime = MyDateTime; myDynamic.MyGuid = MyGuid; myDynamic.MyObject = myDynamicChild; // Verify basic dynamic support. int c = myDynamic.MyInt; Assert.Equal(42, c); return myDynamic; } [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsReflectionEmitSupported))] public static void DynamicKeyword() { dynamic myDynamic = GetExpandoObject(); // STJ serializes ExpandoObject as IDictionary<string, object>; // there is no custom converter for ExpandoObject. string json = JsonSerializer.Serialize<dynamic>(myDynamic); JsonTestHelper.AssertJsonEqual(Json, json); dynamic d = JsonSerializer.Deserialize<dynamic>(json); try { // We will get an exception here if we try to access a dynamic property since 'object' is deserialized // as a JsonElement and not an ExpandoObject. int c = d.MyInt; Assert.True(false, "Should have thrown Exception!"); } catch (Microsoft.CSharp.RuntimeBinder.RuntimeBinderException) { } Assert.IsType<JsonElement>(d); JsonElement elem = (JsonElement)d; VerifyPrimitives(); VerifyObject(); VerifyArray(); // Re-serialize json = JsonSerializer.Serialize<object>(elem); JsonTestHelper.AssertJsonEqual(Json, json); json = JsonSerializer.Serialize<dynamic>(elem); JsonTestHelper.AssertJsonEqual(Json, json); json = JsonSerializer.Serialize(elem); JsonTestHelper.AssertJsonEqual(Json, json); void VerifyPrimitives() { Assert.Equal("Hello", elem.GetProperty("MyString").GetString()); Assert.True(elem.GetProperty("MyBoolean").GetBoolean()); Assert.Equal(42, elem.GetProperty("MyInt").GetInt32()); Assert.Equal(MyDateTime, elem.GetProperty("MyDateTime").GetDateTime()); Assert.Equal(MyGuid, elem.GetProperty("MyGuid").GetGuid()); } void VerifyObject() { Assert.Equal("World", elem.GetProperty("MyObject").GetProperty("MyString").GetString()); } void VerifyArray() { JsonElement.ArrayEnumerator enumerator = elem.GetProperty("MyArray").EnumerateArray(); Assert.Equal(2, enumerator.Count()); enumerator.MoveNext(); Assert.Equal(1, enumerator.Current.GetInt32()); enumerator.MoveNext(); Assert.Equal(2, enumerator.Current.GetInt32()); } } [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsReflectionEmitSupported))] public static void ExpandoObject() { ExpandoObject expando = JsonSerializer.Deserialize<ExpandoObject>(Json); Assert.Equal(8, ((IDictionary<string, object>)expando).Keys.Count); dynamic obj = expando; VerifyPrimitives(); VerifyObject(); VerifyArray(); // Re-serialize string json = JsonSerializer.Serialize<ExpandoObject>(obj); JsonTestHelper.AssertJsonEqual(Json, json); json = JsonSerializer.Serialize<dynamic>(obj); JsonTestHelper.AssertJsonEqual(Json, json); json = JsonSerializer.Serialize(obj); JsonTestHelper.AssertJsonEqual(Json, json); void VerifyPrimitives() { JsonElement jsonElement = obj.MyString; Assert.Equal("Hello", jsonElement.GetString()); jsonElement = obj.MyBoolean; Assert.True(jsonElement.GetBoolean()); jsonElement = obj.MyInt; Assert.Equal(42, jsonElement.GetInt32()); jsonElement = obj.MyDateTime; Assert.Equal(MyDateTime, jsonElement.GetDateTime()); jsonElement = obj.MyGuid; Assert.Equal(MyGuid, jsonElement.GetGuid()); } void VerifyObject() { JsonElement jsonElement = obj.MyObject; // Here we access a property on a nested object and must use JsonElement (not a dynamic property). Assert.Equal("World", jsonElement.GetProperty("MyString").GetString()); } void VerifyArray() { JsonElement jsonElement = obj.MyArray; Assert.Equal(2, jsonElement.EnumerateArray().Count()); } } } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/Interop/PInvoke/Generics/GenericsTest.Vector128D.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using Xunit; unsafe partial class GenericsNative { [DllImport(nameof(GenericsNative))] public static extern Vector128<double> GetVector128D(double e00, double e01); [DllImport(nameof(GenericsNative))] public static extern void GetVector128DOut(double e00, double e01, Vector128<double>* value); [DllImport(nameof(GenericsNative))] public static extern void GetVector128DOut(double e00, double e01, out Vector128<double> value); [DllImport(nameof(GenericsNative))] public static extern Vector128<double>* GetVector128DPtr(double e00, double e01); [DllImport(nameof(GenericsNative), EntryPoint = "GetVector128DPtr")] public static extern ref readonly Vector128<double> GetVector128DRef(double e00, double e01); [DllImport(nameof(GenericsNative))] public static extern Vector128<double> AddVector128D(Vector128<double> lhs, Vector128<double> rhs); [DllImport(nameof(GenericsNative))] public static extern Vector128<double> AddVector128Ds(Vector128<double>* pValues, int count); [DllImport(nameof(GenericsNative))] public static extern Vector128<double> AddVector128Ds([MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 1)] Vector128<double>[] pValues, int count); [DllImport(nameof(GenericsNative))] public static extern Vector128<double> AddVector128Ds(in Vector128<double> pValues, int count); } unsafe partial class GenericsTest { private static void TestVector128D() { Assert.Throws<MarshalDirectiveException>(() => GenericsNative.GetVector128D(1.0, 2.0)); Vector128<double> value2; GenericsNative.GetVector128DOut(1.0, 2.0, &value2); Assert.Equal(value2.GetElement(0), 1.0); Assert.Equal(value2.GetElement(1), 2.0); Assert.Throws<MarshalDirectiveException>(() => GenericsNative.GetVector128DOut(1.0, 2.0, out Vector128<double> value3)); Vector128<double>* value4 = GenericsNative.GetVector128DPtr(1.0, 2.0); Assert.Equal(value4->GetElement(0), 1.0); Assert.Equal(value4->GetElement(1), 2.0); Assert.Throws<MarshalDirectiveException>(() => GenericsNative.GetVector128DRef(1.0, 2.0)); Assert.Throws<MarshalDirectiveException>(() => GenericsNative.AddVector128D(default, default)); Vector128<double>[] values = new Vector128<double>[] { default, value2, default, *value4, default, }; Assert.Throws<MarshalDirectiveException>(() => { fixed (Vector128<double>* pValues = &values[0]) { GenericsNative.AddVector128Ds(pValues, values.Length); } }); Assert.Throws<MarshalDirectiveException>(() => GenericsNative.AddVector128Ds(values, values.Length)); Assert.Throws<MarshalDirectiveException>(() => GenericsNative.AddVector128Ds(in values[0], values.Length)); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using Xunit; unsafe partial class GenericsNative { [DllImport(nameof(GenericsNative))] public static extern Vector128<double> GetVector128D(double e00, double e01); [DllImport(nameof(GenericsNative))] public static extern void GetVector128DOut(double e00, double e01, Vector128<double>* value); [DllImport(nameof(GenericsNative))] public static extern void GetVector128DOut(double e00, double e01, out Vector128<double> value); [DllImport(nameof(GenericsNative))] public static extern Vector128<double>* GetVector128DPtr(double e00, double e01); [DllImport(nameof(GenericsNative), EntryPoint = "GetVector128DPtr")] public static extern ref readonly Vector128<double> GetVector128DRef(double e00, double e01); [DllImport(nameof(GenericsNative))] public static extern Vector128<double> AddVector128D(Vector128<double> lhs, Vector128<double> rhs); [DllImport(nameof(GenericsNative))] public static extern Vector128<double> AddVector128Ds(Vector128<double>* pValues, int count); [DllImport(nameof(GenericsNative))] public static extern Vector128<double> AddVector128Ds([MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 1)] Vector128<double>[] pValues, int count); [DllImport(nameof(GenericsNative))] public static extern Vector128<double> AddVector128Ds(in Vector128<double> pValues, int count); } unsafe partial class GenericsTest { private static void TestVector128D() { Assert.Throws<MarshalDirectiveException>(() => GenericsNative.GetVector128D(1.0, 2.0)); Vector128<double> value2; GenericsNative.GetVector128DOut(1.0, 2.0, &value2); Assert.Equal(value2.GetElement(0), 1.0); Assert.Equal(value2.GetElement(1), 2.0); Assert.Throws<MarshalDirectiveException>(() => GenericsNative.GetVector128DOut(1.0, 2.0, out Vector128<double> value3)); Vector128<double>* value4 = GenericsNative.GetVector128DPtr(1.0, 2.0); Assert.Equal(value4->GetElement(0), 1.0); Assert.Equal(value4->GetElement(1), 2.0); Assert.Throws<MarshalDirectiveException>(() => GenericsNative.GetVector128DRef(1.0, 2.0)); Assert.Throws<MarshalDirectiveException>(() => GenericsNative.AddVector128D(default, default)); Vector128<double>[] values = new Vector128<double>[] { default, value2, default, *value4, default, }; Assert.Throws<MarshalDirectiveException>(() => { fixed (Vector128<double>* pValues = &values[0]) { GenericsNative.AddVector128Ds(pValues, values.Length); } }); Assert.Throws<MarshalDirectiveException>(() => GenericsNative.AddVector128Ds(values, values.Length)); Assert.Throws<MarshalDirectiveException>(() => GenericsNative.AddVector128Ds(in values[0], values.Length)); } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/libraries/Common/src/Interop/Windows/Kernel32/Interop.GetSystemTime.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.InteropServices; internal static partial class Interop { internal static partial class Kernel32 { [LibraryImport(Libraries.Kernel32)] [SuppressGCTransition] internal static unsafe partial void GetSystemTime(Interop.Kernel32.SYSTEMTIME* lpSystemTime); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.InteropServices; internal static partial class Interop { internal static partial class Kernel32 { [LibraryImport(Libraries.Kernel32)] [SuppressGCTransition] internal static unsafe partial void GetSystemTime(Interop.Kernel32.SYSTEMTIME* lpSystemTime); } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/libraries/Microsoft.Extensions.Primitives/src/IChangeToken.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; namespace Microsoft.Extensions.Primitives { /// <summary> /// Propagates notifications that a change has occurred. /// </summary> public interface IChangeToken { /// <summary> /// Gets a value that indicates if a change has occurred. /// </summary> bool HasChanged { get; } /// <summary> /// Indicates if this token will pro-actively raise callbacks. If <c>false</c>, the token consumer must /// poll <see cref="HasChanged" /> to detect changes. /// </summary> bool ActiveChangeCallbacks { get; } /// <summary> /// Registers for a callback that will be invoked when the entry has changed. /// <see cref="HasChanged"/> MUST be set before the callback is invoked. /// </summary> /// <param name="callback">The <see cref="Action{Object}"/> to invoke.</param> /// <param name="state">State to be passed into the callback.</param> /// <returns>An <see cref="IDisposable"/> that is used to unregister the callback.</returns> IDisposable RegisterChangeCallback(Action<object?> callback, object? state); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; namespace Microsoft.Extensions.Primitives { /// <summary> /// Propagates notifications that a change has occurred. /// </summary> public interface IChangeToken { /// <summary> /// Gets a value that indicates if a change has occurred. /// </summary> bool HasChanged { get; } /// <summary> /// Indicates if this token will pro-actively raise callbacks. If <c>false</c>, the token consumer must /// poll <see cref="HasChanged" /> to detect changes. /// </summary> bool ActiveChangeCallbacks { get; } /// <summary> /// Registers for a callback that will be invoked when the entry has changed. /// <see cref="HasChanged"/> MUST be set before the callback is invoked. /// </summary> /// <param name="callback">The <see cref="Action{Object}"/> to invoke.</param> /// <param name="state">State to be passed into the callback.</param> /// <returns>An <see cref="IDisposable"/> that is used to unregister the callback.</returns> IDisposable RegisterChangeCallback(Action<object?> callback, object? state); } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/libraries/System.DirectoryServices/src/System/DirectoryServices/DirectoryEntries.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.InteropServices; using System.Collections; using System.DirectoryServices.Interop; namespace System.DirectoryServices { /// <devdoc> /// Contains the children (child entries) of an entry in the Active Directory. /// </devdoc> public class DirectoryEntries : IEnumerable { // the parent of the children in this collection private readonly DirectoryEntry _container; internal DirectoryEntries(DirectoryEntry parent) { _container = parent; } /// <devdoc> /// Gets the schemas that specify which children are shown. /// </devdoc> public SchemaNameCollection SchemaFilter { get { CheckIsContainer(); SchemaNameCollection.FilterDelegateWrapper filter = new SchemaNameCollection.FilterDelegateWrapper(_container.ContainerObject); return new SchemaNameCollection(filter.Getter, filter.Setter); } } private void CheckIsContainer() { if (!_container.IsContainer) throw new InvalidOperationException(SR.Format(SR.DSNotAContainer, _container.Path)); } /// <devdoc> /// Creates a request to create a new entry in the container. /// </devdoc> public DirectoryEntry Add(string name, string schemaClassName) { CheckIsContainer(); object newChild = _container.ContainerObject.Create(schemaClassName, name); DirectoryEntry entry = new DirectoryEntry(newChild, _container.UsePropertyCache, _container.GetUsername(), _container.GetPassword(), _container.AuthenticationType); entry.JustCreated = true; // suspend writing changes until CommitChanges() is called return entry; } /// <devdoc> /// Returns the child with the given name. /// </devdoc> public DirectoryEntry Find(string name) { // For IIS: and WinNT: providers schemaClassName == "" does general search. return Find(name, null); } /// <devdoc> /// Returns the child with the given name and of the given type. /// </devdoc> public DirectoryEntry Find(string name, string? schemaClassName) { CheckIsContainer(); // Note: schemaClassName == null does not work for IIS: provider. object? o = null; try { o = _container.ContainerObject.GetObject(schemaClassName, name); } catch (COMException e) { throw COMExceptionHelper.CreateFormattedComException(e); } return new DirectoryEntry(o, _container.UsePropertyCache, _container.GetUsername(), _container.GetPassword(), _container.AuthenticationType); } /// <devdoc> /// Deletes a child <see cref='System.DirectoryServices.DirectoryEntry'/> from this collection. /// </devdoc> public void Remove(DirectoryEntry entry) { CheckIsContainer(); try { _container.ContainerObject.Delete(entry.SchemaClassName, entry.Name); } catch (COMException e) { throw COMExceptionHelper.CreateFormattedComException(e); } } public IEnumerator GetEnumerator() => new ChildEnumerator(_container); /// <devdoc> /// Supports a simple ForEach-style iteration over a collection and defines /// enumerators, size, and synchronization methods. /// </devdoc> private sealed class ChildEnumerator : IEnumerator { private readonly DirectoryEntry _container; private SafeNativeMethods.EnumVariant? _enumVariant; private DirectoryEntry? _currentEntry; internal ChildEnumerator(DirectoryEntry container) { _container = container; if (container.IsContainer) { _enumVariant = new SafeNativeMethods.EnumVariant((SafeNativeMethods.IEnumVariant)container.ContainerObject._NewEnum); } } /// <devdoc> /// Gets the current element in the collection. /// </devdoc> public DirectoryEntry Current { get { if (_enumVariant == null) throw new InvalidOperationException(SR.DSNoCurrentChild); if (_currentEntry == null) _currentEntry = new DirectoryEntry(_enumVariant.GetValue(), _container.UsePropertyCache, _container.GetUsername(), _container.GetPassword(), _container.AuthenticationType); return _currentEntry; } } /// <devdoc> /// Advances the enumerator to the next element of the collection /// and returns a Boolean value indicating whether a valid element is available. /// </devdoc> public bool MoveNext() { if (_enumVariant == null) return false; _currentEntry = null; return _enumVariant.GetNext(); } /// <devdoc> /// Resets the enumerator back to its initial position before the first element in the collection. /// </devdoc> public void Reset() { if (_enumVariant != null) { try { _enumVariant.Reset(); } catch (NotImplementedException) { //Some providers might not implement Reset, workaround the problem. _enumVariant = new SafeNativeMethods.EnumVariant((SafeNativeMethods.IEnumVariant)_container.ContainerObject._NewEnum); } _currentEntry = null; } } object IEnumerator.Current => Current; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.InteropServices; using System.Collections; using System.DirectoryServices.Interop; namespace System.DirectoryServices { /// <devdoc> /// Contains the children (child entries) of an entry in the Active Directory. /// </devdoc> public class DirectoryEntries : IEnumerable { // the parent of the children in this collection private readonly DirectoryEntry _container; internal DirectoryEntries(DirectoryEntry parent) { _container = parent; } /// <devdoc> /// Gets the schemas that specify which children are shown. /// </devdoc> public SchemaNameCollection SchemaFilter { get { CheckIsContainer(); SchemaNameCollection.FilterDelegateWrapper filter = new SchemaNameCollection.FilterDelegateWrapper(_container.ContainerObject); return new SchemaNameCollection(filter.Getter, filter.Setter); } } private void CheckIsContainer() { if (!_container.IsContainer) throw new InvalidOperationException(SR.Format(SR.DSNotAContainer, _container.Path)); } /// <devdoc> /// Creates a request to create a new entry in the container. /// </devdoc> public DirectoryEntry Add(string name, string schemaClassName) { CheckIsContainer(); object newChild = _container.ContainerObject.Create(schemaClassName, name); DirectoryEntry entry = new DirectoryEntry(newChild, _container.UsePropertyCache, _container.GetUsername(), _container.GetPassword(), _container.AuthenticationType); entry.JustCreated = true; // suspend writing changes until CommitChanges() is called return entry; } /// <devdoc> /// Returns the child with the given name. /// </devdoc> public DirectoryEntry Find(string name) { // For IIS: and WinNT: providers schemaClassName == "" does general search. return Find(name, null); } /// <devdoc> /// Returns the child with the given name and of the given type. /// </devdoc> public DirectoryEntry Find(string name, string? schemaClassName) { CheckIsContainer(); // Note: schemaClassName == null does not work for IIS: provider. object? o = null; try { o = _container.ContainerObject.GetObject(schemaClassName, name); } catch (COMException e) { throw COMExceptionHelper.CreateFormattedComException(e); } return new DirectoryEntry(o, _container.UsePropertyCache, _container.GetUsername(), _container.GetPassword(), _container.AuthenticationType); } /// <devdoc> /// Deletes a child <see cref='System.DirectoryServices.DirectoryEntry'/> from this collection. /// </devdoc> public void Remove(DirectoryEntry entry) { CheckIsContainer(); try { _container.ContainerObject.Delete(entry.SchemaClassName, entry.Name); } catch (COMException e) { throw COMExceptionHelper.CreateFormattedComException(e); } } public IEnumerator GetEnumerator() => new ChildEnumerator(_container); /// <devdoc> /// Supports a simple ForEach-style iteration over a collection and defines /// enumerators, size, and synchronization methods. /// </devdoc> private sealed class ChildEnumerator : IEnumerator { private readonly DirectoryEntry _container; private SafeNativeMethods.EnumVariant? _enumVariant; private DirectoryEntry? _currentEntry; internal ChildEnumerator(DirectoryEntry container) { _container = container; if (container.IsContainer) { _enumVariant = new SafeNativeMethods.EnumVariant((SafeNativeMethods.IEnumVariant)container.ContainerObject._NewEnum); } } /// <devdoc> /// Gets the current element in the collection. /// </devdoc> public DirectoryEntry Current { get { if (_enumVariant == null) throw new InvalidOperationException(SR.DSNoCurrentChild); if (_currentEntry == null) _currentEntry = new DirectoryEntry(_enumVariant.GetValue(), _container.UsePropertyCache, _container.GetUsername(), _container.GetPassword(), _container.AuthenticationType); return _currentEntry; } } /// <devdoc> /// Advances the enumerator to the next element of the collection /// and returns a Boolean value indicating whether a valid element is available. /// </devdoc> public bool MoveNext() { if (_enumVariant == null) return false; _currentEntry = null; return _enumVariant.GetNext(); } /// <devdoc> /// Resets the enumerator back to its initial position before the first element in the collection. /// </devdoc> public void Reset() { if (_enumVariant != null) { try { _enumVariant.Reset(); } catch (NotImplementedException) { //Some providers might not implement Reset, workaround the problem. _enumVariant = new SafeNativeMethods.EnumVariant((SafeNativeMethods.IEnumVariant)_container.ContainerObject._NewEnum); } _currentEntry = null; } } object IEnumerator.Current => Current; } } }
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./docs/design/coreclr/profiling/davbr-blog-archive/Profiler stack walking Basics and beyond.md
*This blog post originally appeared on David Broman's blog on 10/6/2005* **Introduction** This article is targeted toward profiler authors, and anyone interested in building a profiler to examine managed applications. I will describe how you can program your profiler to walk managed stacks in version 2.0 of the Common Language Runtime. I’ll try to keep the mood light and zany, as one might expect given the overtly wacky subject matter. The profiling API in version 2.0 of the CLR has a new method that lets your profiler walk the call stack of the application you’re profiling: DoStackSnapshot. Version 1.x of the CLR exposed similar functionality via the in-process debugging interface. But it’s easier, more accurate, and more stable with DoStackSnapshot. DoStackSnapshot uses the same stack walker used by the garbage collector, security system, exception system, etc. So you _know_ it’s got to be right. Access to a full stack trace gives customers of your profiler the ability to get the big picture of what’s going on in an application when something “interesting” happens. Depending on the nature of the application and what a user wants to profile, you can imagine a user wanting a call stack when an object is allocated, a class is loaded, an exception is thrown, etc. Even getting a call stack not in response to an application event, but say a timer event, would be interesting for a sampling profiler. Looking at hot spots in code becomes more enlightening when you can see who called the function that called the function that called the function containing the hot spot. I’m going to focus on getting stack traces via the DoStackSnapshot API. But it is worth noting that another way to get stack traces is by building shadow stacks: you can hook FunctionEnter/Leave to keep your own copy of the current thread’s managed call stack. Shadow stack building is useful if you need stack information at all times during the app’s execution, and don’t mind paying the performance cost of having your profiler’s code run on every managed call and return. DoStackSnapshot is best if you need slightly sparser reporting of stacks, such as in response to interesting events. Even a sampling profiler taking stack snapshots every few milliseconds is much sparser than building shadow stacks. So DoStackSnapshot is well-suited for sampling profilers. **Stack walk on the wild side** It’s nice to be able to get call stacks whenever you want them. But with power comes responsibility. A user of a profiler will not want stack walking to be used for evil purposes like causing an AV or deadlock in the runtime. As a profiler writer, you will have to choose how to wield your power. If you choose the side of good, that will be but your first step. I will talk about how to use DoStackSnapshot, and how to do so carefully. It turns out that the more you want to do with this method, the harder it is to get it right. Unless you actually want to be evil. In which case I’m not talking to you. So let’s take a look at the beast. Here’s what your profiler calls (you can find this in ICorProfilerInfo2, in corprof.idl): ``` HRESULT DoStackSnapshot( [in] ThreadID thread, [in] StackSnapshotCallback *callback, [in] ULONG32 infoFlags, [in] void *clientData, [in, size_is(contextSize), length_is(contextSize)] BYTE context[], [in] ULONG32 contextSize); ``` And here’s what the CLR calls on your profiler (you can also find this in corprof.idl). You’ll pass a pointer to your implementation of this function in the callback parameter above. ``` typedef HRESULT __stdcall StackSnapshotCallback( FunctionID funcId, UINT_PTR ip, COR_PRF_FRAME_INFO frameInfo, ULONG32 contextSize, BYTE context[], void *clientData); ``` It’s like a sandwich. When your profiler wants to walk the stack, you call DoStackSnapshot. Before the CLR returns from that call, it calls your StackSnapshotCallback several times, once for each managed frame (or run of unmanaged frames) on the stack: ``` Profiler calls DoStackSnapshot. Whole wheat bread CLR calls StackSnapshotCallback. Lettuce frame (“leaf”-most frame, ha) CLR calls StackSnapshotCallback. Tomato frame CLR calls StackSnapshotCallback. Bacon frame (root or “main” frame) CLR returns back to profiler from DoStackSnapshot Whole wheat bread ``` As you can see from my hilarious notations, we notify you of the frames in the reverse order from how they were pushed onto the stack—leaf (last-pushed) frame first, main (first-pushed) frame last. So what do all these parameters mean? I'm not ready to discuss them all yet, but I guess I’m in the mood to talk about a few of them. Let's start with DoStackSnapshot. infoFlags comes from the COR\_PRF\_SNAPSHOT\_INFO enum in corprof.idl, and it allows you to control whether we’ll give you register contexts for the frames we report. You can specify any value you like for clientData and we’ll faithfully give it back to you in your StackSnapshotCallback. In StackSnapshotCallback, we’ll give you the FunctionID of the currently “walked” frame in funcId. This value will be 0 if the current frame is a run of unmanaged frames, but I’m not ready to talk about that just yet. If it’s nonzero, feel free to pass funcId and / or frameInfo to other methods like GetFunctionInfo2 and GetCodeInfo2 to get more info about the function. You can get this function info now during your stack walk, or save the funcIds and get the function info later on to reduce your impact on the running app. If you do the latter, remember that a frameInfo is only valid inside the callback that gives it to you. While it’s ok to save the funcIds for later use, you may not save the frameInfo for later use. Or, if you like, just report the raw numbers to your users; users love seeing meaningless numbers. When you return from StackSnapshotCallback, you will typically return S\_OK and we will continue walking the stack. If you like, you can return S\_FALSE, and that will cause us to abort the stack walk. Your DoStackSnapshot call will then return with CORPROF\_E\_STACKSNAPSHOT\_ABORTED. **Synchronous / Asynchronous** There are two ways you can call DoStackSnapshot. A **synchronous** call is the easiest to get right. You make a synchronous call when, in response to the CLR calling one of your profiler’s ICorProfilerCallback(2) methods, you call DoStackSnapshot to walk the stack of the current thread. This is useful when you want to see what the stack looks like at an interesting notification like ObjectAllocated. So you just call DoStackSnapshot from within your ICorProfilerCallback(2) method, passing 0 / NULL for those parameters I haven’t told you about yet. When you want to get jiggy with it, you’re kicking it **asynchronous** style. An asynchronous stack walk occurs when you walk the stack of a different thread or, heaven forbid, forcefully interrupt a thread to perform a stack walk (on itself or another thread). The latter involves hijacking the instruction pointer of a thread to force it execute your own code at arbitrary times. This is insanely dangerous for too many reasons to list here. Just, please, don’t do it. I’ll restrict my description of asynchronous stack walks to non-hijacking uses of DoStackSnapshot to walk a separate target thread. I call this “asynchronous” because the target thread was doing any old arbitrary thing at the time we chose to walk its stack. This technique is commonly used by sampling profilers. **_Walking all over someone else_** So let’s break down the cross-thread stack walk a little. You got two threads: the **current** thread and the **target** thread. The current thread is the thread executing DoStackSnapshot. The target thread is the thread whose stack is being walked by DoStackSnapshot. You specify the target thread by passing its thread ID in the thread parameter to DoStackSnapshot. What happens next is not for the faint of heart. Remember, the target thread was executing any old arbitrary code when you came along and asked to walk its stack. So what do we do? We suspend the target thread, and it stays suspended the whole time we walk it. Unbelievable! Have we crossed over to evil? Or can we do this safely? I’m pleased you asked. This is indeed dangerous, and I’ll talk some later about how to do this safely. But first, I'm going to get into “mixed-mode stacks”. **Multiple Personality Disorder** A managed application is likely not going to spend all of its time in managed code. PInvokes and COM interop allow managed code to call into unmanaged code, and sometimes back again via delegates. Also, if you blink, you might miss managed code calling directly into the unmanaged runtime (CLR) to do JIT compilation, deal with exceptions, do a garbage collection, etc. So when you do a stack walk you will probably encounter a mixed-mode stack: some frames are managed functions, and others are unmanaged functions. What is one to do? **_Grow up, already!_** Before I continue from this exciting cliffhanger, a brief interlude. Everyone knows that stacks on our faithful modern PCs grow (i.e., “push”) to smaller addresses. But when we visualize these addresses in our minds or on whiteboards, we disagree with how to sort them vertically. Some of us imagine the stack growing _up_ (little addresses on top); some see it growing _down_ (little addresses on the bottom). We’re divided on this issue in our team as well. I choose to side with any debugger I’ve ever used: call stack traces and memory dumps tell me the little addresses are “above” the big addresses. So stacks grow up. Main is at the bottom, the leaf callee is at the top. If you disagree, you’ll have to do some mental rearranging to get through this article. **_Waiter, there are holes in my stack_** Now that we’re speaking the same language. Let’s look at a mixed-mode stack: ``` Unmanaged D (Managed) Unmanaged C (Managed) B (Managed) Unmanaged A (Managed) Main (Managed) ``` Stepping back a bit, it’s worthwhile to understand why DoStackSnapshot exists in the first place. It’s there to help you walk _managed_ frames on the stack. If you tried to walk managed frames yourself, you would get unreliable results, particularly on 32 bits, because of some wacky calling conventions used in managed code. The CLR understands these calling conventions, and DoStackSnapshot is therefore in a uniquely suitable position to help you decode them. However, DoStackSnapshot is not a complete solution if you want to be able to walk the entire stack, including unmanaged frames. Here’s where you have a choice: 1. Do nothing and report stacks with “unmanaged holes” to your users, or 2. Write your own unmanaged stack walker to fill in those holes. When DoStackSnapshot comes across a block of unmanaged frames, it calls your StackSnapshotCallback with funcId=0. (I think I mentioned this before, but I’m not sure you were listening.) If you’re going with option #1 above, simply do nothing in your callback when funcId=0. We’ll call you again for the next managed frame and you can wake up at that point. Note that if this unmanaged block actually consists of more than one unmanaged frame we still only call StackSnapshotCallback once. Remember, we’re making no effort to decode the unmanaged block—we have special cheat sheets that help us skip over the block to the next managed frame, and that’s how we progress. We don’t necessarily know what’s inside the unmanaged block. That’s for you to figure out. **_That first step’s a doozy_** Unfortunately, filling in the unmanaged holes isn’t the only hard part. Just beginning the walk is a challenge. Take a look at our stack above. No, really, don’t be lazy; just scroll up and take a look. There’s unmanaged gunk at the top. Sometimes you’ll be lucky, and that unmanaged gunk will be COM or PInvoke code. If so, the CLR is smart enough to know how to skip it and will happily begin your walk at the first managed frame (D). However, you might still want to walk the top-most unmanaged block to report as complete a stack as possible. And even if you don’t, you might be forced to anyway if you’re _not_ lucky, and that unmanaged gunk represents not COM or PInvoke code, but helper code in the CLR itself (e.g., to do jitting, a GC, etc.). If that’s the case we won’t be able to find the D frame without your help. So an unseeded call to DoStackSnapshot will result in an error (CORPROF\_E\_STACKSNAPSHOT\_UNMANAGED\_CTX or CORPROF\_E\_STACKSNAPSHOT\_UNSAFE). By the way, if you haven’t visited corerror.h, you really should. It’s beautiful this time of year. If you’re still alert, you might have noticed I used the word “unseeded” without defining it. Well, now’s the time. DoStackSnapshot takes a “seed context” via the context and contextSize parameters. Context is an overused term with many meanings. In this case, I'm talking about a register context. If you peruse the architecture-dependent windows headers (e.g., nti386.h) you’ll find a struct CONTEXT. It contains values for the CPU registers, and represents the CPU’s state at a particular moment in time. This is the type of context I'm talking about here. If you pass NULL for the context parameter, the stack walk we perform is “unseeded”, and we just start at the top. However, if you pass a non-NULL value for the context parameter, presumably representing the CPU-state at some spot lower down on the stack (preferably pointing to the D frame), then we perform a stack walk “seeded” with your context. We ignore the real top of the stack and just start wherever you point us. Ok, that was a lie. The context you pass us is more of a “hint” than an outright directive. If the CLR is certain it can find the first managed frame (because the top-most unmanaged block is PInvoke or COM code), it’ll just do that and ignore your seed. Don’t take it personally, though. The CLR is trying to help you by providing the most accurate stack walk it can. Your seed is only useful if the top-most unmanaged block is helper code in the CLR itself, for which we have no cheat sheet to help us skip it. Since that’s the only situation your seed is useful, that’s the only situation your seed is used. Now if you’re not only still alert but also astute, you will wonder how on earth you can even provide us the seed in the first place. If the target thread is still in motion, you can’t just go and walk this target thread’s stack to find the D frame (and thus calculate your seed context). And yet I’m sitting here telling you to calculate your seed context by doing your unmanaged walk _before_ calling DoStackSnapshot (and thus before DoStackSnapshot takes care of suspending the target thread for you). So what… does the target thread need to be suspended by you _and_ the CLR?! Well, yeah. I think it’s time to choreograph this ballet. But before you get too deep, note that the issue of whether and how to seed a stack walk applies only to _asynchronous_ walks. If you’re doing a synchronous walk, DoStackSnapshot will always be able to find its way to the top-most managed frame without your help. No seed necessary. **_All together now_** For the truly adventurous profiler that is doing an asynchronous, cross-thread, seeded stack walk while filling in the unmanaged holes, here’s what it would look like. Block of Unmanaged Frames 1. You suspend the target thread (target thread’s suspend count is now 1) 2. You get the target thread’s current register context 3. You determine if the register context points to unmanaged code (e.g., call ICorProfilerInfo2::GetFunctionFromIP(), and see if you get back a 0 FunctionID) 4. In this case the register context does point to unmanaged code, so you perform an unmanaged stack walk until you find the top-most managed frame (D) ``` Function D (Managed) ``` 1. You call DoStackSnapshot with your seed context. CLR suspends target thread again: its suspend count is now 2. Our sandwich begins. 1. CLR calls your StackSnapshotCallback with FunctionID for D. ``` Block of Unmanaged Frames ``` 1. CLR calls your StackSnapshotCallback with FunctionID=0. You’ll need to walk this block yourself. You can stop when you hit the first managed frame, or you can cheat: delay your unmanaged walk until sometime after your next callback, as the next callback will tell you exactly where the next managed frame begins (and thus where your unmanaged walk should end). ``` Function C (Managed) ``` 1. CLR calls your StackSnapshotCallback with FunctionID for C. ``` Function B (Managed) ``` 1. CLR calls your StackSnapshotCallback with FunctionID for B. ``` Block of Unmanaged Frames ``` 1. CLR calls your StackSnapshotCallback with FunctionID=0. Again, you’ll need to walk this block yourself. ``` Function A (Managed) ``` 1. CLR calls your StackSnapshotCallback with FunctionID for A. ``` Main (Managed) ``` 1. CLR calls your StackSnapshotCallback with FunctionID for Main. 2. DoStackSnapshot “resumes” target thread (its suspend count is now 1) and returns. Our sandwich is complete. 1. You resume target thread (its suspend count is now 0, so it’s resumed for real). **Triumph over evil** Ok, this is way too much power without some serious caution. In the most advanced case, you’re responding to timer interrupts and suspending application threads arbitrarily to walk their stacks. Yikes! Being good is hard and involves rules that are not obvious at first. So let's dive in. **_The bad seed_** Let’s start with an easy one. If your profiler supplies a bogus (non-null) seed when you call DoStackSnapshot, we’ll give you bogus results. We’ll look at the stack where you point us, and make assumptions about what the values on the stack are supposed to represent. That will cause us to dereference what we expect are addresses on the stack. So if you screw up, we’ll dereference values off into la la land. We do everything we can to avoid an all-out 2nd-chance AV (which would tear down your customer’s process). But you really should make an effort to get your seed right and not take any chances. **_Woes of suspension_** The second you decide to do cross-thread walking, you’ve decided, at a minimum, to ask the CLR to start suspending threads on your behalf. And, even worse, if you want to walk the unmanaged block at the top of the stack, you’ve decided to start suspending threads all by yourself without invoking the CLR’s wisdom on whether this might be a good idea at the current time. We all remember the dining philosophers from CS training, right? Everyone picks up his right fork, and no one can grab his left fork because each dude is waiting on the dude to his left to put down the needed fork. And if they’re all seated in a circle, you’ve got a cycle of waiting and a lot of empty stomachs. The reason these idiots starve to death is, well, for one thing, they think they each need two forks to eat, which is pretty dumb. But that’s not the point. They break a simple rule of deadlock avoidance: if multiple locks are to be taken, always take them in the same order. Following this rule would avoid the cycle where A waits on B, B waits on C, C waits on A. So here’s where it gets interesting. Suppose an app follows the rule and always takes locks in the same order. But now imagine someone comes along and starts arbitrarily suspending threads (that would be your profiler doing the suspending, by the way). The complexity has leaped substantially. What if the suspender now needs to take a lock held by the suspendee? Or more insidious, maybe the suspender needs a lock held by a dude who’s waiting for a lock held by another dude who’s waiting for a lock held by the suspendee? Suspension adds a new edge to our thread-dependency graph, which can introduce cycles. Let’s take a look at some specific problems: Problem 1: _Suspendee owns locks needed by suspender, or needed by threads the suspender depends on._ Problem 1a: _Those locks are CLR locks._ As you might imagine, the CLR has a bit of thread synchronization to do here and there, and therefore has several locks that are used internally. When you call DoStackSnapshot, the CLR detects the condition that the target thread owns a CLR lock that will be needed by the current thread (in order to perform the stack walk). When the condition arises, the CLR refuses to do the suspension, and DoStackSnapshot immediately returns with an error (CORPROF\_E\_STACKSNAPSHOT\_UNSAFE). At this point, if you’ve suspended the thread yourself before your call to DoStackSnapshot, then you will resume the thread yourself, and the pickle remains empty of you. Problem 1b: _Those locks are your own profiler’s locks_ This is more common-sense than anything, really. You may have your own thread synchronization to do here and there, so you can imagine an application thread (Thread A) hits a profiler callback, and runs some of your profiler code that involves taking one of your own locks. Then poof, another thread (Thread B) decides to walk A. This means B will suspend A. So you just need to remember that, while A is suspended, you really shouldn’t have B try to take any of your own locks that A might possibly own. For example, thread B will execute StackSnapshotCallback during the stack walk, so you shouldn’t be trying to take any locks during that callback that could be owned by the suspended target thread (A). Problem 2: _While you suspend the target thread, the target thread tries to suspend you_ “Come on! Like that could really happen.” Believe it or not, if: - Your app runs on a multiproc box, and - Thread A runs on one proc and thread B runs on another, and - A tries to suspend B while B tries to suspend A then it’s possible that both suspensions win, and both threads end up suspended. It’s like the line from that movie: “Multiproc means never having to say, ‘I lose.’”. Since each thread is waiting for the other to wake it up, they stay suspended forever. It is the most romantic of all deadlocks. This really can happen, and it is more disconcerting than problem #1, because you can’t rely on the CLR to detect this for you when you do the suspension yourself before calling DoStackSnapshot. Once you’ve done the suspension, it’s too late! Ok, so, why is the target thread trying to suspend you anyway? Well, in a hypothetical, poorly-written profiler, you could imagine that the stack walking code (along with the suspension code) might be executed by any number of threads at arbitrary times. In other words, imagine A is trying to walk B at the same time B is trying to walk A. They both try to suspend each other simultaneously (because they’re both executing the SuspendThread() portion of the profiler’s stack walking routine), both win, and we deadlock. The rule here is obvious—don’t do that! A less obvious reason that the target thread might try to suspend your walking thread is due to the inner workings of the CLR. The CLR suspends application threads to help with things like garbage collection. So if your walker tries to walk (and thus suspend) the thread doing the GC at the same time the thread doing the GC tries to suspend your walker, you are hosed. The way out, fortunately, is quite simple. The CLR is only going to suspend threads it needs to suspend in order to do its work. Let’s label the two threads involved in your stack walk: Thread A = the current thread (the thread performing the walk), and Thread B = the target thread (the thread whose stack is walked). As long as Thread A has _never executed managed code_ (and is therefore of no use to the CLR during a garbage collection), then the CLR will never try to suspend Thread A. This means it’s safe for your profiler to have Thread A suspend Thread B, as the CLR will have no reason for B to suspend A. If you’re writing a sampling profiler, it’s quite natural to ensure all of this. You will typically have a separate thread of your own creation that responds to timer interrupts and walks the stacks of other threads. Call this your sampler thread. Since you create this sampler thread yourself and have control over what it executes, the CLR will have no reason to suspend it. And this also fixes the “poorly-written profiler” example above, since this sampler thread is the only thread of your profiler trying to walk or suspend other threads. So your profiler will never try to directly suspend the sampler thread. This is our first nontrivial rule so, for emphasis, let’s repeat with some neat formatting: Rule 1: Only a thread that has never run managed code can suspend another thread **_Nobody likes to walk a corpse_** If you are doing a cross-thread stack walk, you need to ensure your target thread remains alive for the duration of your walk. Just because you pass the target thread as a parameter to the DoStackSnapshot call doesn’t mean you’ve implicitly added some kind of lifetime reference to it. If the app wants the thread to go away it will. And if that happens while you’re trying to walk it, you could easily AV. Lucky for you, the CLR notifies profilers when a thread is about to be destroyed via the aptly-named ThreadDestroyed callback (ICorProfilerCallback(2)). So it’s your responsibility to implement ThreadDestroyed and have it wait until anyone walking that thread is finished. This is interesting enough to qualify as our next rule: Rule 2: Block in ThreadDestroyed callback until that thread’s stack walk is complete **_GC helps you make a cycle_** Ok, at this point you might want to take a bathroom break or get some caffeine or something. Things get a little hairy here. Let’s start with the text of the next rule, and decipher it from there: Rule 3: Do not hold a lock during a profiler call that can trigger a GC A while back I mentioned that it is clearly a bad idea for your profiler to hold one if its own locks if the owning thread might get suspended and then walked by another thread that will need the same lock. Rule 3 warns us against something more subtle. Here, I'm saying you shouldn’t hold _any_ of your own locks if the owning thread is about to call an ICorProfilerInfo(2) method that might trigger a garbage collection. A couple examples should help. Example #1: - Thread A successfully grabs and now owns one of your profiler locks - Thread B = thread doing the GC - Thread B calls profiler’s GarbageCollectionStarted callback - Thread B blocks on the same profiler lock - Thread A executes GetClassFromTokenAndTypeArgs() - GetClassFromTokenAndTypeArgs tries to trigger a GC, but notices a GC is already in progress. - Thread A blocks, waiting for GC currently in progress (Thread B) to complete - But B is waiting for A, because of your profiler lock. ![](media/gccycle.jpg) Example #2: - Thread A successfully grabs and now owns one of your profiler locks - Thread B calls profiler’s ModuleLoadStarted callback - Thread B blocks on the same profiler lock - Thread A executes GetClassFromTokenAndTypeArgs() - GetClassFromTokenAndTypeArgs triggers a GC - Thread A (now doing the GC) waits for B to be ready to be collected - But B is waiting for A, because of your profiler lock. ![](media/deadlock.jpg) Have you digested the madness? The crux of the problem is that garbage collection has its own synchronization mechanisms. Example 1 involved the fact that only one GC can occur at a time. This is admittedly a fringe case, as GCs don’t spontaneously occur quite so often that one has to wait for another, unless you’re operating under stressful conditions. Even so, if you profile long enough, this will happen, and you need to be prepared. Example 2 involved the fact that the thread doing the GC must wait for the other application threads to be ready to be collected. The problem arises when you introduce one of your own locks into the mix, thus forming a cycle. In both cases we broke the rule by allowing A to own one of your locks and then call GetClassFromTokenAndTypeArgs (though calling any method that might trigger a GC is sufficient to doom us). How’s that caffeine holding out? If it’s working, you probably have a couple questions. “How do I know which ICorProfilerInfo(2) methods might trigger a garbage collection?” We plan to document this on MSDN, or at the least, in my or [Jonathan Keljo’s blog](http://blogs.msdn.com/jkeljo/default.aspx). “What does this have to do with stack walking?” Yeah, if you read carefully, you’ll see that this rule never even mentions DoStackSnapshot. And no, DoStackSnapshot is not even one of those mysterious ICorProfilerInfo(2) methods that trigger a GC. The reason I'm discussing this rule here is that it’s precisely you daring cowboys—who asynchronously walk stacks at arbitrary samples—who will be most likely to implement your own profiler locks, and thus be prone to falling into this trap. Indeed, rule 2 above downright tells you to add some synchronization into your profiler. It is quite likely a sampling profiler will have other synchronization mechanisms as well, perhaps to coordinate reading / writing shared data structures at arbitrary times. Of course, it’s still quite possible for a profiler that never touches DoStackSnapshot to need to deal with this issue. So tell your friends. **Enough is enough** I’m just about tuckered out, so I’m gonna close this out with a quick summary of the highlights. Here's what's important to remember. 1. Synchronous stack walks involve walking the current thread in response to a profiler callback. These don’t require seeding, suspending, or any special rules. Enjoy! 2. Asynchronous walks require a seed if the top of the stack is unmanaged code not part of a PInvoke or COM call. You supply a seed by directly suspending the target thread and walking it yourself, until you find the top-most managed frame. If you don’t supply a seed in this case, DoStackSnapshot will just return a failure code to you. 3. If you directly suspend threads, remember that only a thread that has never run managed code can suspend another thread 4. When doing asynchronous walks, always block in your ThreadDestroyed callback until that thread’s stack walk is complete 5. Do not hold a lock while your profiler calls into a CLR function that can trigger a GC Finally, a note of thanks to the rest of the CLR Profiling API team, as the writing of these rules is truly a team effort. And special thanks to Sean Selitrennikoff who provided an earlier incarnation of much of this content.
*This blog post originally appeared on David Broman's blog on 10/6/2005* **Introduction** This article is targeted toward profiler authors, and anyone interested in building a profiler to examine managed applications. I will describe how you can program your profiler to walk managed stacks in version 2.0 of the Common Language Runtime. I’ll try to keep the mood light and zany, as one might expect given the overtly wacky subject matter. The profiling API in version 2.0 of the CLR has a new method that lets your profiler walk the call stack of the application you’re profiling: DoStackSnapshot. Version 1.x of the CLR exposed similar functionality via the in-process debugging interface. But it’s easier, more accurate, and more stable with DoStackSnapshot. DoStackSnapshot uses the same stack walker used by the garbage collector, security system, exception system, etc. So you _know_ it’s got to be right. Access to a full stack trace gives customers of your profiler the ability to get the big picture of what’s going on in an application when something “interesting” happens. Depending on the nature of the application and what a user wants to profile, you can imagine a user wanting a call stack when an object is allocated, a class is loaded, an exception is thrown, etc. Even getting a call stack not in response to an application event, but say a timer event, would be interesting for a sampling profiler. Looking at hot spots in code becomes more enlightening when you can see who called the function that called the function that called the function containing the hot spot. I’m going to focus on getting stack traces via the DoStackSnapshot API. But it is worth noting that another way to get stack traces is by building shadow stacks: you can hook FunctionEnter/Leave to keep your own copy of the current thread’s managed call stack. Shadow stack building is useful if you need stack information at all times during the app’s execution, and don’t mind paying the performance cost of having your profiler’s code run on every managed call and return. DoStackSnapshot is best if you need slightly sparser reporting of stacks, such as in response to interesting events. Even a sampling profiler taking stack snapshots every few milliseconds is much sparser than building shadow stacks. So DoStackSnapshot is well-suited for sampling profilers. **Stack walk on the wild side** It’s nice to be able to get call stacks whenever you want them. But with power comes responsibility. A user of a profiler will not want stack walking to be used for evil purposes like causing an AV or deadlock in the runtime. As a profiler writer, you will have to choose how to wield your power. If you choose the side of good, that will be but your first step. I will talk about how to use DoStackSnapshot, and how to do so carefully. It turns out that the more you want to do with this method, the harder it is to get it right. Unless you actually want to be evil. In which case I’m not talking to you. So let’s take a look at the beast. Here’s what your profiler calls (you can find this in ICorProfilerInfo2, in corprof.idl): ``` HRESULT DoStackSnapshot( [in] ThreadID thread, [in] StackSnapshotCallback *callback, [in] ULONG32 infoFlags, [in] void *clientData, [in, size_is(contextSize), length_is(contextSize)] BYTE context[], [in] ULONG32 contextSize); ``` And here’s what the CLR calls on your profiler (you can also find this in corprof.idl). You’ll pass a pointer to your implementation of this function in the callback parameter above. ``` typedef HRESULT __stdcall StackSnapshotCallback( FunctionID funcId, UINT_PTR ip, COR_PRF_FRAME_INFO frameInfo, ULONG32 contextSize, BYTE context[], void *clientData); ``` It’s like a sandwich. When your profiler wants to walk the stack, you call DoStackSnapshot. Before the CLR returns from that call, it calls your StackSnapshotCallback several times, once for each managed frame (or run of unmanaged frames) on the stack: ``` Profiler calls DoStackSnapshot. Whole wheat bread CLR calls StackSnapshotCallback. Lettuce frame (“leaf”-most frame, ha) CLR calls StackSnapshotCallback. Tomato frame CLR calls StackSnapshotCallback. Bacon frame (root or “main” frame) CLR returns back to profiler from DoStackSnapshot Whole wheat bread ``` As you can see from my hilarious notations, we notify you of the frames in the reverse order from how they were pushed onto the stack—leaf (last-pushed) frame first, main (first-pushed) frame last. So what do all these parameters mean? I'm not ready to discuss them all yet, but I guess I’m in the mood to talk about a few of them. Let's start with DoStackSnapshot. infoFlags comes from the COR\_PRF\_SNAPSHOT\_INFO enum in corprof.idl, and it allows you to control whether we’ll give you register contexts for the frames we report. You can specify any value you like for clientData and we’ll faithfully give it back to you in your StackSnapshotCallback. In StackSnapshotCallback, we’ll give you the FunctionID of the currently “walked” frame in funcId. This value will be 0 if the current frame is a run of unmanaged frames, but I’m not ready to talk about that just yet. If it’s nonzero, feel free to pass funcId and / or frameInfo to other methods like GetFunctionInfo2 and GetCodeInfo2 to get more info about the function. You can get this function info now during your stack walk, or save the funcIds and get the function info later on to reduce your impact on the running app. If you do the latter, remember that a frameInfo is only valid inside the callback that gives it to you. While it’s ok to save the funcIds for later use, you may not save the frameInfo for later use. Or, if you like, just report the raw numbers to your users; users love seeing meaningless numbers. When you return from StackSnapshotCallback, you will typically return S\_OK and we will continue walking the stack. If you like, you can return S\_FALSE, and that will cause us to abort the stack walk. Your DoStackSnapshot call will then return with CORPROF\_E\_STACKSNAPSHOT\_ABORTED. **Synchronous / Asynchronous** There are two ways you can call DoStackSnapshot. A **synchronous** call is the easiest to get right. You make a synchronous call when, in response to the CLR calling one of your profiler’s ICorProfilerCallback(2) methods, you call DoStackSnapshot to walk the stack of the current thread. This is useful when you want to see what the stack looks like at an interesting notification like ObjectAllocated. So you just call DoStackSnapshot from within your ICorProfilerCallback(2) method, passing 0 / NULL for those parameters I haven’t told you about yet. When you want to get jiggy with it, you’re kicking it **asynchronous** style. An asynchronous stack walk occurs when you walk the stack of a different thread or, heaven forbid, forcefully interrupt a thread to perform a stack walk (on itself or another thread). The latter involves hijacking the instruction pointer of a thread to force it execute your own code at arbitrary times. This is insanely dangerous for too many reasons to list here. Just, please, don’t do it. I’ll restrict my description of asynchronous stack walks to non-hijacking uses of DoStackSnapshot to walk a separate target thread. I call this “asynchronous” because the target thread was doing any old arbitrary thing at the time we chose to walk its stack. This technique is commonly used by sampling profilers. **_Walking all over someone else_** So let’s break down the cross-thread stack walk a little. You got two threads: the **current** thread and the **target** thread. The current thread is the thread executing DoStackSnapshot. The target thread is the thread whose stack is being walked by DoStackSnapshot. You specify the target thread by passing its thread ID in the thread parameter to DoStackSnapshot. What happens next is not for the faint of heart. Remember, the target thread was executing any old arbitrary code when you came along and asked to walk its stack. So what do we do? We suspend the target thread, and it stays suspended the whole time we walk it. Unbelievable! Have we crossed over to evil? Or can we do this safely? I’m pleased you asked. This is indeed dangerous, and I’ll talk some later about how to do this safely. But first, I'm going to get into “mixed-mode stacks”. **Multiple Personality Disorder** A managed application is likely not going to spend all of its time in managed code. PInvokes and COM interop allow managed code to call into unmanaged code, and sometimes back again via delegates. Also, if you blink, you might miss managed code calling directly into the unmanaged runtime (CLR) to do JIT compilation, deal with exceptions, do a garbage collection, etc. So when you do a stack walk you will probably encounter a mixed-mode stack: some frames are managed functions, and others are unmanaged functions. What is one to do? **_Grow up, already!_** Before I continue from this exciting cliffhanger, a brief interlude. Everyone knows that stacks on our faithful modern PCs grow (i.e., “push”) to smaller addresses. But when we visualize these addresses in our minds or on whiteboards, we disagree with how to sort them vertically. Some of us imagine the stack growing _up_ (little addresses on top); some see it growing _down_ (little addresses on the bottom). We’re divided on this issue in our team as well. I choose to side with any debugger I’ve ever used: call stack traces and memory dumps tell me the little addresses are “above” the big addresses. So stacks grow up. Main is at the bottom, the leaf callee is at the top. If you disagree, you’ll have to do some mental rearranging to get through this article. **_Waiter, there are holes in my stack_** Now that we’re speaking the same language. Let’s look at a mixed-mode stack: ``` Unmanaged D (Managed) Unmanaged C (Managed) B (Managed) Unmanaged A (Managed) Main (Managed) ``` Stepping back a bit, it’s worthwhile to understand why DoStackSnapshot exists in the first place. It’s there to help you walk _managed_ frames on the stack. If you tried to walk managed frames yourself, you would get unreliable results, particularly on 32 bits, because of some wacky calling conventions used in managed code. The CLR understands these calling conventions, and DoStackSnapshot is therefore in a uniquely suitable position to help you decode them. However, DoStackSnapshot is not a complete solution if you want to be able to walk the entire stack, including unmanaged frames. Here’s where you have a choice: 1. Do nothing and report stacks with “unmanaged holes” to your users, or 2. Write your own unmanaged stack walker to fill in those holes. When DoStackSnapshot comes across a block of unmanaged frames, it calls your StackSnapshotCallback with funcId=0. (I think I mentioned this before, but I’m not sure you were listening.) If you’re going with option #1 above, simply do nothing in your callback when funcId=0. We’ll call you again for the next managed frame and you can wake up at that point. Note that if this unmanaged block actually consists of more than one unmanaged frame we still only call StackSnapshotCallback once. Remember, we’re making no effort to decode the unmanaged block—we have special cheat sheets that help us skip over the block to the next managed frame, and that’s how we progress. We don’t necessarily know what’s inside the unmanaged block. That’s for you to figure out. **_That first step’s a doozy_** Unfortunately, filling in the unmanaged holes isn’t the only hard part. Just beginning the walk is a challenge. Take a look at our stack above. No, really, don’t be lazy; just scroll up and take a look. There’s unmanaged gunk at the top. Sometimes you’ll be lucky, and that unmanaged gunk will be COM or PInvoke code. If so, the CLR is smart enough to know how to skip it and will happily begin your walk at the first managed frame (D). However, you might still want to walk the top-most unmanaged block to report as complete a stack as possible. And even if you don’t, you might be forced to anyway if you’re _not_ lucky, and that unmanaged gunk represents not COM or PInvoke code, but helper code in the CLR itself (e.g., to do jitting, a GC, etc.). If that’s the case we won’t be able to find the D frame without your help. So an unseeded call to DoStackSnapshot will result in an error (CORPROF\_E\_STACKSNAPSHOT\_UNMANAGED\_CTX or CORPROF\_E\_STACKSNAPSHOT\_UNSAFE). By the way, if you haven’t visited corerror.h, you really should. It’s beautiful this time of year. If you’re still alert, you might have noticed I used the word “unseeded” without defining it. Well, now’s the time. DoStackSnapshot takes a “seed context” via the context and contextSize parameters. Context is an overused term with many meanings. In this case, I'm talking about a register context. If you peruse the architecture-dependent windows headers (e.g., nti386.h) you’ll find a struct CONTEXT. It contains values for the CPU registers, and represents the CPU’s state at a particular moment in time. This is the type of context I'm talking about here. If you pass NULL for the context parameter, the stack walk we perform is “unseeded”, and we just start at the top. However, if you pass a non-NULL value for the context parameter, presumably representing the CPU-state at some spot lower down on the stack (preferably pointing to the D frame), then we perform a stack walk “seeded” with your context. We ignore the real top of the stack and just start wherever you point us. Ok, that was a lie. The context you pass us is more of a “hint” than an outright directive. If the CLR is certain it can find the first managed frame (because the top-most unmanaged block is PInvoke or COM code), it’ll just do that and ignore your seed. Don’t take it personally, though. The CLR is trying to help you by providing the most accurate stack walk it can. Your seed is only useful if the top-most unmanaged block is helper code in the CLR itself, for which we have no cheat sheet to help us skip it. Since that’s the only situation your seed is useful, that’s the only situation your seed is used. Now if you’re not only still alert but also astute, you will wonder how on earth you can even provide us the seed in the first place. If the target thread is still in motion, you can’t just go and walk this target thread’s stack to find the D frame (and thus calculate your seed context). And yet I’m sitting here telling you to calculate your seed context by doing your unmanaged walk _before_ calling DoStackSnapshot (and thus before DoStackSnapshot takes care of suspending the target thread for you). So what… does the target thread need to be suspended by you _and_ the CLR?! Well, yeah. I think it’s time to choreograph this ballet. But before you get too deep, note that the issue of whether and how to seed a stack walk applies only to _asynchronous_ walks. If you’re doing a synchronous walk, DoStackSnapshot will always be able to find its way to the top-most managed frame without your help. No seed necessary. **_All together now_** For the truly adventurous profiler that is doing an asynchronous, cross-thread, seeded stack walk while filling in the unmanaged holes, here’s what it would look like. Block of Unmanaged Frames 1. You suspend the target thread (target thread’s suspend count is now 1) 2. You get the target thread’s current register context 3. You determine if the register context points to unmanaged code (e.g., call ICorProfilerInfo2::GetFunctionFromIP(), and see if you get back a 0 FunctionID) 4. In this case the register context does point to unmanaged code, so you perform an unmanaged stack walk until you find the top-most managed frame (D) ``` Function D (Managed) ``` 1. You call DoStackSnapshot with your seed context. CLR suspends target thread again: its suspend count is now 2. Our sandwich begins. 1. CLR calls your StackSnapshotCallback with FunctionID for D. ``` Block of Unmanaged Frames ``` 1. CLR calls your StackSnapshotCallback with FunctionID=0. You’ll need to walk this block yourself. You can stop when you hit the first managed frame, or you can cheat: delay your unmanaged walk until sometime after your next callback, as the next callback will tell you exactly where the next managed frame begins (and thus where your unmanaged walk should end). ``` Function C (Managed) ``` 1. CLR calls your StackSnapshotCallback with FunctionID for C. ``` Function B (Managed) ``` 1. CLR calls your StackSnapshotCallback with FunctionID for B. ``` Block of Unmanaged Frames ``` 1. CLR calls your StackSnapshotCallback with FunctionID=0. Again, you’ll need to walk this block yourself. ``` Function A (Managed) ``` 1. CLR calls your StackSnapshotCallback with FunctionID for A. ``` Main (Managed) ``` 1. CLR calls your StackSnapshotCallback with FunctionID for Main. 2. DoStackSnapshot “resumes” target thread (its suspend count is now 1) and returns. Our sandwich is complete. 1. You resume target thread (its suspend count is now 0, so it’s resumed for real). **Triumph over evil** Ok, this is way too much power without some serious caution. In the most advanced case, you’re responding to timer interrupts and suspending application threads arbitrarily to walk their stacks. Yikes! Being good is hard and involves rules that are not obvious at first. So let's dive in. **_The bad seed_** Let’s start with an easy one. If your profiler supplies a bogus (non-null) seed when you call DoStackSnapshot, we’ll give you bogus results. We’ll look at the stack where you point us, and make assumptions about what the values on the stack are supposed to represent. That will cause us to dereference what we expect are addresses on the stack. So if you screw up, we’ll dereference values off into la la land. We do everything we can to avoid an all-out 2nd-chance AV (which would tear down your customer’s process). But you really should make an effort to get your seed right and not take any chances. **_Woes of suspension_** The second you decide to do cross-thread walking, you’ve decided, at a minimum, to ask the CLR to start suspending threads on your behalf. And, even worse, if you want to walk the unmanaged block at the top of the stack, you’ve decided to start suspending threads all by yourself without invoking the CLR’s wisdom on whether this might be a good idea at the current time. We all remember the dining philosophers from CS training, right? Everyone picks up his right fork, and no one can grab his left fork because each dude is waiting on the dude to his left to put down the needed fork. And if they’re all seated in a circle, you’ve got a cycle of waiting and a lot of empty stomachs. The reason these idiots starve to death is, well, for one thing, they think they each need two forks to eat, which is pretty dumb. But that’s not the point. They break a simple rule of deadlock avoidance: if multiple locks are to be taken, always take them in the same order. Following this rule would avoid the cycle where A waits on B, B waits on C, C waits on A. So here’s where it gets interesting. Suppose an app follows the rule and always takes locks in the same order. But now imagine someone comes along and starts arbitrarily suspending threads (that would be your profiler doing the suspending, by the way). The complexity has leaped substantially. What if the suspender now needs to take a lock held by the suspendee? Or more insidious, maybe the suspender needs a lock held by a dude who’s waiting for a lock held by another dude who’s waiting for a lock held by the suspendee? Suspension adds a new edge to our thread-dependency graph, which can introduce cycles. Let’s take a look at some specific problems: Problem 1: _Suspendee owns locks needed by suspender, or needed by threads the suspender depends on._ Problem 1a: _Those locks are CLR locks._ As you might imagine, the CLR has a bit of thread synchronization to do here and there, and therefore has several locks that are used internally. When you call DoStackSnapshot, the CLR detects the condition that the target thread owns a CLR lock that will be needed by the current thread (in order to perform the stack walk). When the condition arises, the CLR refuses to do the suspension, and DoStackSnapshot immediately returns with an error (CORPROF\_E\_STACKSNAPSHOT\_UNSAFE). At this point, if you’ve suspended the thread yourself before your call to DoStackSnapshot, then you will resume the thread yourself, and the pickle remains empty of you. Problem 1b: _Those locks are your own profiler’s locks_ This is more common-sense than anything, really. You may have your own thread synchronization to do here and there, so you can imagine an application thread (Thread A) hits a profiler callback, and runs some of your profiler code that involves taking one of your own locks. Then poof, another thread (Thread B) decides to walk A. This means B will suspend A. So you just need to remember that, while A is suspended, you really shouldn’t have B try to take any of your own locks that A might possibly own. For example, thread B will execute StackSnapshotCallback during the stack walk, so you shouldn’t be trying to take any locks during that callback that could be owned by the suspended target thread (A). Problem 2: _While you suspend the target thread, the target thread tries to suspend you_ “Come on! Like that could really happen.” Believe it or not, if: - Your app runs on a multiproc box, and - Thread A runs on one proc and thread B runs on another, and - A tries to suspend B while B tries to suspend A then it’s possible that both suspensions win, and both threads end up suspended. It’s like the line from that movie: “Multiproc means never having to say, ‘I lose.’”. Since each thread is waiting for the other to wake it up, they stay suspended forever. It is the most romantic of all deadlocks. This really can happen, and it is more disconcerting than problem #1, because you can’t rely on the CLR to detect this for you when you do the suspension yourself before calling DoStackSnapshot. Once you’ve done the suspension, it’s too late! Ok, so, why is the target thread trying to suspend you anyway? Well, in a hypothetical, poorly-written profiler, you could imagine that the stack walking code (along with the suspension code) might be executed by any number of threads at arbitrary times. In other words, imagine A is trying to walk B at the same time B is trying to walk A. They both try to suspend each other simultaneously (because they’re both executing the SuspendThread() portion of the profiler’s stack walking routine), both win, and we deadlock. The rule here is obvious—don’t do that! A less obvious reason that the target thread might try to suspend your walking thread is due to the inner workings of the CLR. The CLR suspends application threads to help with things like garbage collection. So if your walker tries to walk (and thus suspend) the thread doing the GC at the same time the thread doing the GC tries to suspend your walker, you are hosed. The way out, fortunately, is quite simple. The CLR is only going to suspend threads it needs to suspend in order to do its work. Let’s label the two threads involved in your stack walk: Thread A = the current thread (the thread performing the walk), and Thread B = the target thread (the thread whose stack is walked). As long as Thread A has _never executed managed code_ (and is therefore of no use to the CLR during a garbage collection), then the CLR will never try to suspend Thread A. This means it’s safe for your profiler to have Thread A suspend Thread B, as the CLR will have no reason for B to suspend A. If you’re writing a sampling profiler, it’s quite natural to ensure all of this. You will typically have a separate thread of your own creation that responds to timer interrupts and walks the stacks of other threads. Call this your sampler thread. Since you create this sampler thread yourself and have control over what it executes, the CLR will have no reason to suspend it. And this also fixes the “poorly-written profiler” example above, since this sampler thread is the only thread of your profiler trying to walk or suspend other threads. So your profiler will never try to directly suspend the sampler thread. This is our first nontrivial rule so, for emphasis, let’s repeat with some neat formatting: Rule 1: Only a thread that has never run managed code can suspend another thread **_Nobody likes to walk a corpse_** If you are doing a cross-thread stack walk, you need to ensure your target thread remains alive for the duration of your walk. Just because you pass the target thread as a parameter to the DoStackSnapshot call doesn’t mean you’ve implicitly added some kind of lifetime reference to it. If the app wants the thread to go away it will. And if that happens while you’re trying to walk it, you could easily AV. Lucky for you, the CLR notifies profilers when a thread is about to be destroyed via the aptly-named ThreadDestroyed callback (ICorProfilerCallback(2)). So it’s your responsibility to implement ThreadDestroyed and have it wait until anyone walking that thread is finished. This is interesting enough to qualify as our next rule: Rule 2: Block in ThreadDestroyed callback until that thread’s stack walk is complete **_GC helps you make a cycle_** Ok, at this point you might want to take a bathroom break or get some caffeine or something. Things get a little hairy here. Let’s start with the text of the next rule, and decipher it from there: Rule 3: Do not hold a lock during a profiler call that can trigger a GC A while back I mentioned that it is clearly a bad idea for your profiler to hold one if its own locks if the owning thread might get suspended and then walked by another thread that will need the same lock. Rule 3 warns us against something more subtle. Here, I'm saying you shouldn’t hold _any_ of your own locks if the owning thread is about to call an ICorProfilerInfo(2) method that might trigger a garbage collection. A couple examples should help. Example #1: - Thread A successfully grabs and now owns one of your profiler locks - Thread B = thread doing the GC - Thread B calls profiler’s GarbageCollectionStarted callback - Thread B blocks on the same profiler lock - Thread A executes GetClassFromTokenAndTypeArgs() - GetClassFromTokenAndTypeArgs tries to trigger a GC, but notices a GC is already in progress. - Thread A blocks, waiting for GC currently in progress (Thread B) to complete - But B is waiting for A, because of your profiler lock. ![](media/gccycle.jpg) Example #2: - Thread A successfully grabs and now owns one of your profiler locks - Thread B calls profiler’s ModuleLoadStarted callback - Thread B blocks on the same profiler lock - Thread A executes GetClassFromTokenAndTypeArgs() - GetClassFromTokenAndTypeArgs triggers a GC - Thread A (now doing the GC) waits for B to be ready to be collected - But B is waiting for A, because of your profiler lock. ![](media/deadlock.jpg) Have you digested the madness? The crux of the problem is that garbage collection has its own synchronization mechanisms. Example 1 involved the fact that only one GC can occur at a time. This is admittedly a fringe case, as GCs don’t spontaneously occur quite so often that one has to wait for another, unless you’re operating under stressful conditions. Even so, if you profile long enough, this will happen, and you need to be prepared. Example 2 involved the fact that the thread doing the GC must wait for the other application threads to be ready to be collected. The problem arises when you introduce one of your own locks into the mix, thus forming a cycle. In both cases we broke the rule by allowing A to own one of your locks and then call GetClassFromTokenAndTypeArgs (though calling any method that might trigger a GC is sufficient to doom us). How’s that caffeine holding out? If it’s working, you probably have a couple questions. “How do I know which ICorProfilerInfo(2) methods might trigger a garbage collection?” We plan to document this on MSDN, or at the least, in my or [Jonathan Keljo’s blog](http://blogs.msdn.com/jkeljo/default.aspx). “What does this have to do with stack walking?” Yeah, if you read carefully, you’ll see that this rule never even mentions DoStackSnapshot. And no, DoStackSnapshot is not even one of those mysterious ICorProfilerInfo(2) methods that trigger a GC. The reason I'm discussing this rule here is that it’s precisely you daring cowboys—who asynchronously walk stacks at arbitrary samples—who will be most likely to implement your own profiler locks, and thus be prone to falling into this trap. Indeed, rule 2 above downright tells you to add some synchronization into your profiler. It is quite likely a sampling profiler will have other synchronization mechanisms as well, perhaps to coordinate reading / writing shared data structures at arbitrary times. Of course, it’s still quite possible for a profiler that never touches DoStackSnapshot to need to deal with this issue. So tell your friends. **Enough is enough** I’m just about tuckered out, so I’m gonna close this out with a quick summary of the highlights. Here's what's important to remember. 1. Synchronous stack walks involve walking the current thread in response to a profiler callback. These don’t require seeding, suspending, or any special rules. Enjoy! 2. Asynchronous walks require a seed if the top of the stack is unmanaged code not part of a PInvoke or COM call. You supply a seed by directly suspending the target thread and walking it yourself, until you find the top-most managed frame. If you don’t supply a seed in this case, DoStackSnapshot will just return a failure code to you. 3. If you directly suspend threads, remember that only a thread that has never run managed code can suspend another thread 4. When doing asynchronous walks, always block in your ThreadDestroyed callback until that thread’s stack walk is complete 5. Do not hold a lock while your profiler calls into a CLR function that can trigger a GC Finally, a note of thanks to the rest of the CLR Profiling API team, as the writing of these rules is truly a team effort. And special thanks to Sean Selitrennikoff who provided an earlier incarnation of much of this content.
-1
dotnet/runtime
66,422
Fix JsonSerializer src-gen issues with reference handler feature
Fixes https://github.com/dotnet/runtime/issues/64813.
layomia
2022-03-09T23:28:28Z
2022-03-14T17:40:01Z
9bc400f576a004b6af1c6b31d15e6bb356212dad
c2fa3ec056464f9f7081af56c252e481a355ec62
Fix JsonSerializer src-gen issues with reference handler feature. Fixes https://github.com/dotnet/runtime/issues/64813.
./src/tests/JIT/Directed/RVAInit/extended.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly extern xunit.core {} .assembly extern mscorlib { } .assembly extended { } .custom instance void [mscorlib]System.Security.UnverifiableCodeAttribute::.ctor() = ( 01 00 00 00 ) .file alignment 512 .namespace JitTest { .class private auto ansi beforefieldinit Test extends [mscorlib]System.Object { .field private static unsigned int8 buffer at D_1 .class explicit ansi sealed nested private '__StaticArrayInitTypeSize=1024' extends [mscorlib]System.ValueType { .pack 1 .size 1024 } .field private static valuetype JitTest.Test/'__StaticArrayInitTypeSize=1024' buffer2 at D_1 .method public hidebysig static void fillmem(unsigned int8& a) cil managed { .maxstack 2 .locals (unsigned int8& pinned V_0, int32 V_1) IL_0000: ldarg.0 IL_0001: stloc.0 IL_0002: ldloc.0 IL_0003: conv.i IL_0004: ldc.i4 0x3ff IL_0009: add IL_000a: ldind.u1 IL_000b: ldc.i4.s 123 IL_000d: beq.s IL_0015 IL_000f: newobj instance void [mscorlib]System.Exception::.ctor() IL_0014: throw IL_0015: ldc.i4.0 IL_0016: stloc.1 IL_0017: br.s IL_0024 IL_0019: ldloc.0 IL_001a: conv.i IL_001b: ldloc.1 IL_001c: add IL_001d: ldloc.1 IL_001e: conv.u1 IL_001f: stind.i1 IL_0020: ldloc.1 IL_0021: ldc.i4.1 IL_0022: add IL_0023: stloc.1 IL_0024: ldloc.1 IL_0025: ldc.i4 0x400 IL_002a: blt.s IL_0019 IL_002c: ldc.i4.0 IL_002d: conv.u IL_002e: stloc.0 IL_002f: ret } .method public hidebysig static void chekmem(unsigned int8& a) cil managed { .maxstack 2 .locals (unsigned int8& pinned V_0, int32 V_1) IL_0000: ldarg.0 IL_0001: stloc.0 IL_0002: ldc.i4.0 IL_0003: stloc.1 IL_0004: br.s IL_0019 IL_0006: ldloc.0 IL_0007: conv.i IL_0008: ldloc.1 IL_0009: add IL_000a: ldind.u1 IL_000b: ldloc.1 IL_000c: conv.u1 IL_000d: beq.s IL_0015 IL_000f: newobj instance void [mscorlib]System.Exception::.ctor() IL_0014: throw IL_0015: ldloc.1 IL_0016: ldc.i4.1 IL_0017: add IL_0018: stloc.1 IL_0019: ldloc.1 IL_001a: ldc.i4 0x400 IL_001f: blt.s IL_0006 IL_0021: ldc.i4.0 IL_0022: conv.u IL_0023: stloc.0 IL_0024: ret } .method private hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 2 .locals (int32 V_0) IL_0000: ldsfld unsigned int8 JitTest.Test::buffer IL_0005: ldc.i4.s 11 IL_0007: beq.s IL_000f IL_0009: newobj instance void [mscorlib]System.Exception::.ctor() IL_000e: throw IL_000f: ldsflda unsigned int8 JitTest.Test::buffer IL_0014: call void JitTest.Test::fillmem(unsigned int8&) IL_0019: ldsflda unsigned int8 JitTest.Test::buffer IL_001e: call void JitTest.Test::chekmem(unsigned int8&) IL_0023: ldstr "Passed => 100" IL_0028: call void [System.Console]System.Console::WriteLine(string) IL_002d: ldc.i4.s 100 IL_002f: stloc.0 IL_0030: br.s IL_0032 IL_0032: ldloc.0 IL_0033: ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { .maxstack 8 IL_0000: ldarg.0 IL_0001: call instance void [mscorlib]System.Object::.ctor() IL_0006: ret } } } .data D_1 = bytearray( 0B 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 7B )
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly extern xunit.core {} .assembly extern mscorlib { } .assembly extended { } .custom instance void [mscorlib]System.Security.UnverifiableCodeAttribute::.ctor() = ( 01 00 00 00 ) .file alignment 512 .namespace JitTest { .class private auto ansi beforefieldinit Test extends [mscorlib]System.Object { .field private static unsigned int8 buffer at D_1 .class explicit ansi sealed nested private '__StaticArrayInitTypeSize=1024' extends [mscorlib]System.ValueType { .pack 1 .size 1024 } .field private static valuetype JitTest.Test/'__StaticArrayInitTypeSize=1024' buffer2 at D_1 .method public hidebysig static void fillmem(unsigned int8& a) cil managed { .maxstack 2 .locals (unsigned int8& pinned V_0, int32 V_1) IL_0000: ldarg.0 IL_0001: stloc.0 IL_0002: ldloc.0 IL_0003: conv.i IL_0004: ldc.i4 0x3ff IL_0009: add IL_000a: ldind.u1 IL_000b: ldc.i4.s 123 IL_000d: beq.s IL_0015 IL_000f: newobj instance void [mscorlib]System.Exception::.ctor() IL_0014: throw IL_0015: ldc.i4.0 IL_0016: stloc.1 IL_0017: br.s IL_0024 IL_0019: ldloc.0 IL_001a: conv.i IL_001b: ldloc.1 IL_001c: add IL_001d: ldloc.1 IL_001e: conv.u1 IL_001f: stind.i1 IL_0020: ldloc.1 IL_0021: ldc.i4.1 IL_0022: add IL_0023: stloc.1 IL_0024: ldloc.1 IL_0025: ldc.i4 0x400 IL_002a: blt.s IL_0019 IL_002c: ldc.i4.0 IL_002d: conv.u IL_002e: stloc.0 IL_002f: ret } .method public hidebysig static void chekmem(unsigned int8& a) cil managed { .maxstack 2 .locals (unsigned int8& pinned V_0, int32 V_1) IL_0000: ldarg.0 IL_0001: stloc.0 IL_0002: ldc.i4.0 IL_0003: stloc.1 IL_0004: br.s IL_0019 IL_0006: ldloc.0 IL_0007: conv.i IL_0008: ldloc.1 IL_0009: add IL_000a: ldind.u1 IL_000b: ldloc.1 IL_000c: conv.u1 IL_000d: beq.s IL_0015 IL_000f: newobj instance void [mscorlib]System.Exception::.ctor() IL_0014: throw IL_0015: ldloc.1 IL_0016: ldc.i4.1 IL_0017: add IL_0018: stloc.1 IL_0019: ldloc.1 IL_001a: ldc.i4 0x400 IL_001f: blt.s IL_0006 IL_0021: ldc.i4.0 IL_0022: conv.u IL_0023: stloc.0 IL_0024: ret } .method private hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 2 .locals (int32 V_0) IL_0000: ldsfld unsigned int8 JitTest.Test::buffer IL_0005: ldc.i4.s 11 IL_0007: beq.s IL_000f IL_0009: newobj instance void [mscorlib]System.Exception::.ctor() IL_000e: throw IL_000f: ldsflda unsigned int8 JitTest.Test::buffer IL_0014: call void JitTest.Test::fillmem(unsigned int8&) IL_0019: ldsflda unsigned int8 JitTest.Test::buffer IL_001e: call void JitTest.Test::chekmem(unsigned int8&) IL_0023: ldstr "Passed => 100" IL_0028: call void [System.Console]System.Console::WriteLine(string) IL_002d: ldc.i4.s 100 IL_002f: stloc.0 IL_0030: br.s IL_0032 IL_0032: ldloc.0 IL_0033: ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { .maxstack 8 IL_0000: ldarg.0 IL_0001: call instance void [mscorlib]System.Object::.ctor() IL_0006: ret } } } .data D_1 = bytearray( 0B 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 7B )
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/jit/compiler.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif // _MSC_VER #include "hostallocator.h" #include "emit.h" #include "ssabuilder.h" #include "valuenum.h" #include "rangecheck.h" #include "lower.h" #include "stacklevelsetter.h" #include "jittelemetry.h" #include "patchpointinfo.h" #include "jitstd/algorithm.h" extern ICorJitHost* g_jitHost; #if defined(DEBUG) // Column settings for COMPlus_JitDumpIR. We could(should) make these programmable. #define COLUMN_OPCODE 30 #define COLUMN_OPERANDS (COLUMN_OPCODE + 25) #define COLUMN_KINDS 110 #define COLUMN_FLAGS (COLUMN_KINDS + 32) #endif #if defined(DEBUG) unsigned Compiler::jitTotalMethodCompiled = 0; #endif // defined(DEBUG) #if defined(DEBUG) LONG Compiler::jitNestingLevel = 0; #endif // defined(DEBUG) // static bool Compiler::s_pAltJitExcludeAssembliesListInitialized = false; AssemblyNamesList2* Compiler::s_pAltJitExcludeAssembliesList = nullptr; #ifdef DEBUG // static bool Compiler::s_pJitDisasmIncludeAssembliesListInitialized = false; AssemblyNamesList2* Compiler::s_pJitDisasmIncludeAssembliesList = nullptr; // static bool Compiler::s_pJitFunctionFileInitialized = false; MethodSet* Compiler::s_pJitMethodSet = nullptr; #endif // DEBUG #ifdef CONFIGURABLE_ARM_ABI // static bool GlobalJitOptions::compFeatureHfa = false; LONG GlobalJitOptions::compUseSoftFPConfigured = 0; #endif // CONFIGURABLE_ARM_ABI /***************************************************************************** * * Little helpers to grab the current cycle counter value; this is done * differently based on target architecture, host toolchain, etc. The * main thing is to keep the overhead absolutely minimal; in fact, on * x86/x64 we use RDTSC even though it's not thread-safe; GetThreadCycles * (which is monotonous) is just too expensive. */ #ifdef FEATURE_JIT_METHOD_PERF #if defined(HOST_X86) || defined(HOST_AMD64) #if defined(_MSC_VER) #include <intrin.h> inline bool _our_GetThreadCycles(unsigned __int64* cycleOut) { *cycleOut = __rdtsc(); return true; } #elif defined(__GNUC__) inline bool _our_GetThreadCycles(unsigned __int64* cycleOut) { uint32_t hi, lo; __asm__ __volatile__("rdtsc" : "=a"(lo), "=d"(hi)); *cycleOut = (static_cast<unsigned __int64>(hi) << 32) | static_cast<unsigned __int64>(lo); return true; } #else // neither _MSC_VER nor __GNUC__ // The following *might* work - might as well try. #define _our_GetThreadCycles(cp) GetThreadCycles(cp) #endif #elif defined(HOST_ARM) || defined(HOST_ARM64) // If this doesn't work please see ../gc/gc.cpp for additional ARM // info (and possible solutions). #define _our_GetThreadCycles(cp) GetThreadCycles(cp) #else // not x86/x64 and not ARM // Don't know what this target is, but let's give it a try; if // someone really wants to make this work, please add the right // code here. #define _our_GetThreadCycles(cp) GetThreadCycles(cp) #endif // which host OS const BYTE genTypeSizes[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) sz, #include "typelist.h" #undef DEF_TP }; const BYTE genTypeAlignments[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) al, #include "typelist.h" #undef DEF_TP }; const BYTE genTypeStSzs[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) st, #include "typelist.h" #undef DEF_TP }; const BYTE genActualTypes[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) jitType, #include "typelist.h" #undef DEF_TP }; #endif // FEATURE_JIT_METHOD_PERF /*****************************************************************************/ inline unsigned getCurTime() { SYSTEMTIME tim; GetSystemTime(&tim); return (((tim.wHour * 60) + tim.wMinute) * 60 + tim.wSecond) * 1000 + tim.wMilliseconds; } /*****************************************************************************/ #ifdef DEBUG /*****************************************************************************/ static FILE* jitSrcFilePtr; static unsigned jitCurSrcLine; void Compiler::JitLogEE(unsigned level, const char* fmt, ...) { va_list args; if (verbose) { va_start(args, fmt); vflogf(jitstdout, fmt, args); va_end(args); } va_start(args, fmt); vlogf(level, fmt, args); va_end(args); } #endif // DEBUG /*****************************************************************************/ #if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS static unsigned genMethodCnt; // total number of methods JIT'ted unsigned genMethodICnt; // number of interruptible methods unsigned genMethodNCnt; // number of non-interruptible methods static unsigned genSmallMethodsNeedingExtraMemoryCnt = 0; #endif /*****************************************************************************/ #if MEASURE_NODE_SIZE NodeSizeStats genNodeSizeStats; NodeSizeStats genNodeSizeStatsPerFunc; unsigned genTreeNcntHistBuckets[] = {10, 20, 30, 40, 50, 100, 200, 300, 400, 500, 1000, 5000, 10000, 0}; Histogram genTreeNcntHist(genTreeNcntHistBuckets); unsigned genTreeNsizHistBuckets[] = {1000, 5000, 10000, 50000, 100000, 500000, 1000000, 0}; Histogram genTreeNsizHist(genTreeNsizHistBuckets); #endif // MEASURE_NODE_SIZE /*****************************************************************************/ #if MEASURE_MEM_ALLOC unsigned memAllocHistBuckets[] = {64, 128, 192, 256, 512, 1024, 4096, 8192, 0}; Histogram memAllocHist(memAllocHistBuckets); unsigned memUsedHistBuckets[] = {16, 32, 64, 128, 192, 256, 512, 1024, 4096, 8192, 0}; Histogram memUsedHist(memUsedHistBuckets); #endif // MEASURE_MEM_ALLOC /***************************************************************************** * * Variables to keep track of total code amounts. */ #if DISPLAY_SIZES size_t grossVMsize; // Total IL code size size_t grossNCsize; // Native code + data size size_t totalNCsize; // Native code + data + GC info size (TODO-Cleanup: GC info size only accurate for JIT32_GCENCODER) size_t gcHeaderISize; // GC header size: interruptible methods size_t gcPtrMapISize; // GC pointer map size: interruptible methods size_t gcHeaderNSize; // GC header size: non-interruptible methods size_t gcPtrMapNSize; // GC pointer map size: non-interruptible methods #endif // DISPLAY_SIZES /***************************************************************************** * * Variables to keep track of argument counts. */ #if CALL_ARG_STATS unsigned argTotalCalls; unsigned argHelperCalls; unsigned argStaticCalls; unsigned argNonVirtualCalls; unsigned argVirtualCalls; unsigned argTotalArgs; // total number of args for all calls (including objectPtr) unsigned argTotalDWordArgs; unsigned argTotalLongArgs; unsigned argTotalFloatArgs; unsigned argTotalDoubleArgs; unsigned argTotalRegArgs; unsigned argTotalTemps; unsigned argTotalLclVar; unsigned argTotalDeferred; unsigned argTotalConst; unsigned argTotalObjPtr; unsigned argTotalGTF_ASGinArgs; unsigned argMaxTempsPerMethod; unsigned argCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argCntTable(argCntBuckets); unsigned argDWordCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argDWordCntTable(argDWordCntBuckets); unsigned argDWordLngCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argDWordLngCntTable(argDWordLngCntBuckets); unsigned argTempsCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argTempsCntTable(argTempsCntBuckets); #endif // CALL_ARG_STATS /***************************************************************************** * * Variables to keep track of basic block counts. */ #if COUNT_BASIC_BLOCKS // -------------------------------------------------- // Basic block count frequency table: // -------------------------------------------------- // <= 1 ===> 26872 count ( 56% of total) // 2 .. 2 ===> 669 count ( 58% of total) // 3 .. 3 ===> 4687 count ( 68% of total) // 4 .. 5 ===> 5101 count ( 78% of total) // 6 .. 10 ===> 5575 count ( 90% of total) // 11 .. 20 ===> 3028 count ( 97% of total) // 21 .. 50 ===> 1108 count ( 99% of total) // 51 .. 100 ===> 182 count ( 99% of total) // 101 .. 1000 ===> 34 count (100% of total) // 1001 .. 10000 ===> 0 count (100% of total) // -------------------------------------------------- unsigned bbCntBuckets[] = {1, 2, 3, 5, 10, 20, 50, 100, 1000, 10000, 0}; Histogram bbCntTable(bbCntBuckets); /* Histogram for the IL opcode size of methods with a single basic block */ unsigned bbSizeBuckets[] = {1, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 0}; Histogram bbOneBBSizeTable(bbSizeBuckets); #endif // COUNT_BASIC_BLOCKS /***************************************************************************** * * Used by optFindNaturalLoops to gather statistical information such as * - total number of natural loops * - number of loops with 1, 2, ... exit conditions * - number of loops that have an iterator (for like) * - number of loops that have a constant iterator */ #if COUNT_LOOPS unsigned totalLoopMethods; // counts the total number of methods that have natural loops unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent unsigned totalLoopCount; // counts the total number of natural loops unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent unsigned iterLoopCount; // counts the # of loops with an iterator (for like) unsigned simpleTestLoopCount; // counts the # of loops with an iterator and a simple loop condition (iter < const) unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like) bool hasMethodLoops; // flag to keep track if we already counted a method as having loops unsigned loopsThisMethod; // counts the number of loops in the current method bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method. /* Histogram for number of loops in a method */ unsigned loopCountBuckets[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0}; Histogram loopCountTable(loopCountBuckets); /* Histogram for number of loop exits */ unsigned loopExitCountBuckets[] = {0, 1, 2, 3, 4, 5, 6, 0}; Histogram loopExitCountTable(loopExitCountBuckets); #endif // COUNT_LOOPS //------------------------------------------------------------------------ // getJitGCType: Given the VM's CorInfoGCType convert it to the JIT's var_types // // Arguments: // gcType - an enum value that originally came from an element // of the BYTE[] returned from getClassGClayout() // // Return Value: // The corresponsing enum value from the JIT's var_types // // Notes: // The gcLayout of each field of a struct is returned from getClassGClayout() // as a BYTE[] but each BYTE element is actually a CorInfoGCType value // Note when we 'know' that there is only one element in theis array // the JIT will often pass the address of a single BYTE, instead of a BYTE[] // var_types Compiler::getJitGCType(BYTE gcType) { var_types result = TYP_UNKNOWN; CorInfoGCType corInfoType = (CorInfoGCType)gcType; if (corInfoType == TYPE_GC_NONE) { result = TYP_I_IMPL; } else if (corInfoType == TYPE_GC_REF) { result = TYP_REF; } else if (corInfoType == TYPE_GC_BYREF) { result = TYP_BYREF; } else { noway_assert(!"Bad value of 'gcType'"); } return result; } #ifdef TARGET_X86 //--------------------------------------------------------------------------- // isTrivialPointerSizedStruct: // Check if the given struct type contains only one pointer-sized integer value type // // Arguments: // clsHnd - the handle for the struct type. // // Return Value: // true if the given struct type contains only one pointer-sized integer value type, // false otherwise. // bool Compiler::isTrivialPointerSizedStruct(CORINFO_CLASS_HANDLE clsHnd) const { assert(info.compCompHnd->isValueClass(clsHnd)); if (info.compCompHnd->getClassSize(clsHnd) != TARGET_POINTER_SIZE) { return false; } for (;;) { // all of class chain must be of value type and must have only one field if (!info.compCompHnd->isValueClass(clsHnd) || info.compCompHnd->getClassNumInstanceFields(clsHnd) != 1) { return false; } CORINFO_CLASS_HANDLE* pClsHnd = &clsHnd; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); CorInfoType fieldType = info.compCompHnd->getFieldType(fldHnd, pClsHnd); var_types vt = JITtype2varType(fieldType); if (fieldType == CORINFO_TYPE_VALUECLASS) { clsHnd = *pClsHnd; } else if (varTypeIsI(vt) && !varTypeIsGC(vt)) { return true; } else { return false; } } } #endif // TARGET_X86 //--------------------------------------------------------------------------- // isNativePrimitiveStructType: // Check if the given struct type is an intrinsic type that should be treated as though // it is not a struct at the unmanaged ABI boundary. // // Arguments: // clsHnd - the handle for the struct type. // // Return Value: // true if the given struct type should be treated as a primitive for unmanaged calls, // false otherwise. // bool Compiler::isNativePrimitiveStructType(CORINFO_CLASS_HANDLE clsHnd) { if (!isIntrinsicType(clsHnd)) { return false; } const char* namespaceName = nullptr; const char* typeName = getClassNameFromMetadata(clsHnd, &namespaceName); if (strcmp(namespaceName, "System.Runtime.InteropServices") != 0) { return false; } return strcmp(typeName, "CLong") == 0 || strcmp(typeName, "CULong") == 0 || strcmp(typeName, "NFloat") == 0; } //----------------------------------------------------------------------------- // getPrimitiveTypeForStruct: // Get the "primitive" type that is is used for a struct // of size 'structSize'. // We examine 'clsHnd' to check the GC layout of the struct and // return TYP_REF for structs that simply wrap an object. // If the struct is a one element HFA/HVA, we will return the // proper floating point or vector type. // // Arguments: // structSize - the size of the struct type, cannot be zero // clsHnd - the handle for the struct type, used when may have // an HFA or if we need the GC layout for an object ref. // // Return Value: // The primitive type (i.e. byte, short, int, long, ref, float, double) // used to pass or return structs of this size. // If we shouldn't use a "primitive" type then TYP_UNKNOWN is returned. // Notes: // For 32-bit targets (X86/ARM32) the 64-bit TYP_LONG type is not // considered a primitive type by this method. // So a struct that wraps a 'long' is passed and returned in the // same way as any other 8-byte struct // For ARM32 if we have an HFA struct that wraps a 64-bit double // we will return TYP_DOUBLE. // For vector calling conventions, a vector is considered a "primitive" // type, as it is passed in a single register. // var_types Compiler::getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd, bool isVarArg) { assert(structSize != 0); var_types useType = TYP_UNKNOWN; // Start by determining if we have an HFA/HVA with a single element. if (GlobalJitOptions::compFeatureHfa) { // Arm64 Windows VarArg methods arguments will not classify HFA types, they will need to be treated // as if they are not HFA types. if (!(TargetArchitecture::IsArm64 && TargetOS::IsWindows && isVarArg)) { switch (structSize) { case 4: case 8: #ifdef TARGET_ARM64 case 16: #endif // TARGET_ARM64 { var_types hfaType = GetHfaType(clsHnd); // We're only interested in the case where the struct size is equal to the size of the hfaType. if (varTypeIsValidHfaType(hfaType)) { if (genTypeSize(hfaType) == structSize) { useType = hfaType; } else { return TYP_UNKNOWN; } } } } if (useType != TYP_UNKNOWN) { return useType; } } } // Now deal with non-HFA/HVA structs. switch (structSize) { case 1: useType = TYP_BYTE; break; case 2: useType = TYP_SHORT; break; #if !defined(TARGET_XARCH) || defined(UNIX_AMD64_ABI) case 3: useType = TYP_INT; break; #endif // !TARGET_XARCH || UNIX_AMD64_ABI #ifdef TARGET_64BIT case 4: // We dealt with the one-float HFA above. All other 4-byte structs are handled as INT. useType = TYP_INT; break; #if !defined(TARGET_XARCH) || defined(UNIX_AMD64_ABI) case 5: case 6: case 7: useType = TYP_I_IMPL; break; #endif // !TARGET_XARCH || UNIX_AMD64_ABI #endif // TARGET_64BIT case TARGET_POINTER_SIZE: { BYTE gcPtr = 0; // Check if this pointer-sized struct is wrapping a GC object info.compCompHnd->getClassGClayout(clsHnd, &gcPtr); useType = getJitGCType(gcPtr); } break; default: useType = TYP_UNKNOWN; break; } return useType; } //----------------------------------------------------------------------------- // getArgTypeForStruct: // Get the type that is used to pass values of the given struct type. // If you have already retrieved the struct size then it should be // passed as the optional fourth argument, as this allows us to avoid // an extra call to getClassSize(clsHnd) // // Arguments: // clsHnd - the handle for the struct type // wbPassStruct - An "out" argument with information about how // the struct is to be passed // isVarArg - is vararg, used to ignore HFA types for Arm64 windows varargs // structSize - the size of the struct type, // or zero if we should call getClassSize(clsHnd) // // Return Value: // For wbPassStruct you can pass a 'nullptr' and nothing will be written // or returned for that out parameter. // When *wbPassStruct is SPK_PrimitiveType this method's return value // is the primitive type used to pass the struct. // When *wbPassStruct is SPK_ByReference this method's return value // is always TYP_UNKNOWN and the struct type is passed by reference to a copy // When *wbPassStruct is SPK_ByValue or SPK_ByValueAsHfa this method's return value // is always TYP_STRUCT and the struct type is passed by value either // using multiple registers or on the stack. // // Assumptions: // The size must be the size of the given type. // The given class handle must be for a value type (struct). // // Notes: // About HFA types: // When the clsHnd is a one element HFA type we return the appropriate // floating point primitive type and *wbPassStruct is SPK_PrimitiveType // If there are two or more elements in the HFA type then the this method's // return value is TYP_STRUCT and *wbPassStruct is SPK_ByValueAsHfa // var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, structPassingKind* wbPassStruct, bool isVarArg, unsigned structSize) { var_types useType = TYP_UNKNOWN; structPassingKind howToPassStruct = SPK_Unknown; // We must change this before we return assert(structSize != 0); // Determine if we can pass the struct as a primitive type. // Note that on x86 we only pass specific pointer-sized structs that satisfy isTrivialPointerSizedStruct checks. #ifndef TARGET_X86 #ifdef UNIX_AMD64_ABI // An 8-byte struct may need to be passed in a floating point register // So we always consult the struct "Classifier" routine // SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; eeGetSystemVAmd64PassStructInRegisterDescriptor(clsHnd, &structDesc); if (structDesc.passedInRegisters && (structDesc.eightByteCount != 1)) { // We can't pass this as a primitive type. } else if (structDesc.eightByteClassifications[0] == SystemVClassificationTypeSSE) { // If this is passed as a floating type, use that. // Otherwise, we'll use the general case - we don't want to use the "EightByteType" // directly, because it returns `TYP_INT` for any integral type <= 4 bytes, and // we need to preserve small types. useType = GetEightByteType(structDesc, 0); } else #endif // UNIX_AMD64_ABI // The largest arg passed in a single register is MAX_PASS_SINGLEREG_BYTES, // so we can skip calling getPrimitiveTypeForStruct when we // have a struct that is larger than that. // if (structSize <= MAX_PASS_SINGLEREG_BYTES) { // We set the "primitive" useType based upon the structSize // and also examine the clsHnd to see if it is an HFA of count one useType = getPrimitiveTypeForStruct(structSize, clsHnd, isVarArg); } #else if (isTrivialPointerSizedStruct(clsHnd)) { useType = TYP_I_IMPL; } #endif // !TARGET_X86 // Did we change this struct type into a simple "primitive" type? // if (useType != TYP_UNKNOWN) { // Yes, we should use the "primitive" type in 'useType' howToPassStruct = SPK_PrimitiveType; } else // We can't replace the struct with a "primitive" type { // See if we can pass this struct by value, possibly in multiple registers // or if we should pass it by reference to a copy // if (structSize <= MAX_PASS_MULTIREG_BYTES) { // Structs that are HFA/HVA's are passed by value in multiple registers. // Arm64 Windows VarArg methods arguments will not classify HFA/HVA types, they will need to be treated // as if they are not HFA/HVA types. var_types hfaType; if (TargetArchitecture::IsArm64 && TargetOS::IsWindows && isVarArg) { hfaType = TYP_UNDEF; } else { hfaType = GetHfaType(clsHnd); } if (varTypeIsValidHfaType(hfaType)) { // HFA's of count one should have been handled by getPrimitiveTypeForStruct assert(GetHfaCount(clsHnd) >= 2); // setup wbPassType and useType indicate that this is passed by value as an HFA // using multiple registers // (when all of the parameters registers are used, then the stack will be used) howToPassStruct = SPK_ByValueAsHfa; useType = TYP_STRUCT; } else // Not an HFA struct type { #ifdef UNIX_AMD64_ABI // The case of (structDesc.eightByteCount == 1) should have already been handled if ((structDesc.eightByteCount > 1) || !structDesc.passedInRegisters) { // setup wbPassType and useType indicate that this is passed by value in multiple registers // (when all of the parameters registers are used, then the stack will be used) howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; } else { assert(structDesc.eightByteCount == 0); // Otherwise we pass this struct by reference to a copy // setup wbPassType and useType indicate that this is passed using one register // (by reference to a copy) howToPassStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_ARM64) // Structs that are pointer sized or smaller should have been handled by getPrimitiveTypeForStruct assert(structSize > TARGET_POINTER_SIZE); // On ARM64 structs that are 9-16 bytes are passed by value in multiple registers // if (structSize <= (TARGET_POINTER_SIZE * 2)) { // setup wbPassType and useType indicate that this is passed by value in multiple registers // (when all of the parameters registers are used, then the stack will be used) howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; } else // a structSize that is 17-32 bytes in size { // Otherwise we pass this struct by reference to a copy // setup wbPassType and useType indicate that this is passed using one register // (by reference to a copy) howToPassStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_X86) || defined(TARGET_ARM) // Otherwise we pass this struct by value on the stack // setup wbPassType and useType indicate that this is passed by value according to the X86/ARM32 ABI howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; #else // TARGET_XXX noway_assert(!"Unhandled TARGET in getArgTypeForStruct (with FEATURE_MULTIREG_ARGS=1)"); #endif // TARGET_XXX } } else // (structSize > MAX_PASS_MULTIREG_BYTES) { // We have a (large) struct that can't be replaced with a "primitive" type // and can't be passed in multiple registers CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) || defined(TARGET_ARM) || defined(UNIX_AMD64_ABI) // Otherwise we pass this struct by value on the stack // setup wbPassType and useType indicate that this is passed by value according to the X86/ARM32 ABI howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; #elif defined(TARGET_AMD64) || defined(TARGET_ARM64) // Otherwise we pass this struct by reference to a copy // setup wbPassType and useType indicate that this is passed using one register (by reference to a copy) howToPassStruct = SPK_ByReference; useType = TYP_UNKNOWN; #else // TARGET_XXX noway_assert(!"Unhandled TARGET in getArgTypeForStruct"); #endif // TARGET_XXX } } // 'howToPassStruct' must be set to one of the valid values before we return assert(howToPassStruct != SPK_Unknown); if (wbPassStruct != nullptr) { *wbPassStruct = howToPassStruct; } return useType; } //----------------------------------------------------------------------------- // getReturnTypeForStruct: // Get the type that is used to return values of the given struct type. // If you have already retrieved the struct size then it should be // passed as the optional third argument, as this allows us to avoid // an extra call to getClassSize(clsHnd) // // Arguments: // clsHnd - the handle for the struct type // callConv - the calling convention of the function // that returns this struct. // wbReturnStruct - An "out" argument with information about how // the struct is to be returned // structSize - the size of the struct type, // or zero if we should call getClassSize(clsHnd) // // Return Value: // For wbReturnStruct you can pass a 'nullptr' and nothing will be written // or returned for that out parameter. // When *wbReturnStruct is SPK_PrimitiveType this method's return value // is the primitive type used to return the struct. // When *wbReturnStruct is SPK_ByReference this method's return value // is always TYP_UNKNOWN and the struct type is returned using a return buffer // When *wbReturnStruct is SPK_ByValue or SPK_ByValueAsHfa this method's return value // is always TYP_STRUCT and the struct type is returned using multiple registers. // // Assumptions: // The size must be the size of the given type. // The given class handle must be for a value type (struct). // // Notes: // About HFA types: // When the clsHnd is a one element HFA type then this method's return // value is the appropriate floating point primitive type and // *wbReturnStruct is SPK_PrimitiveType. // If there are two or more elements in the HFA type and the target supports // multireg return types then the return value is TYP_STRUCT and // *wbReturnStruct is SPK_ByValueAsHfa. // Additionally if there are two or more elements in the HFA type and // the target doesn't support multreg return types then it is treated // as if it wasn't an HFA type. // About returning TYP_STRUCT: // Whenever this method's return value is TYP_STRUCT it always means // that multiple registers are used to return this struct. // var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, CorInfoCallConvExtension callConv, structPassingKind* wbReturnStruct /* = nullptr */, unsigned structSize /* = 0 */) { var_types useType = TYP_UNKNOWN; structPassingKind howToReturnStruct = SPK_Unknown; // We must change this before we return bool canReturnInRegister = true; assert(clsHnd != NO_CLASS_HANDLE); if (structSize == 0) { structSize = info.compCompHnd->getClassSize(clsHnd); } assert(structSize > 0); #ifdef UNIX_AMD64_ABI // An 8-byte struct may need to be returned in a floating point register // So we always consult the struct "Classifier" routine // SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; eeGetSystemVAmd64PassStructInRegisterDescriptor(clsHnd, &structDesc); if (structDesc.eightByteCount == 1) { assert(structSize <= sizeof(double)); assert(structDesc.passedInRegisters); if (structDesc.eightByteClassifications[0] == SystemVClassificationTypeSSE) { // If this is returned as a floating type, use that. // Otherwise, leave as TYP_UNKONWN and we'll sort things out below. useType = GetEightByteType(structDesc, 0); howToReturnStruct = SPK_PrimitiveType; } } else { // Return classification is not always size based... canReturnInRegister = structDesc.passedInRegisters; if (!canReturnInRegister) { assert(structDesc.eightByteCount == 0); howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } } #elif UNIX_X86_ABI if (callConv != CorInfoCallConvExtension::Managed && !isNativePrimitiveStructType(clsHnd)) { canReturnInRegister = false; howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #endif if (TargetOS::IsWindows && !TargetArchitecture::IsArm32 && callConvIsInstanceMethodCallConv(callConv) && !isNativePrimitiveStructType(clsHnd)) { canReturnInRegister = false; howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } // Check for cases where a small struct is returned in a register // via a primitive type. // // The largest "primitive type" is MAX_PASS_SINGLEREG_BYTES // so we can skip calling getPrimitiveTypeForStruct when we // have a struct that is larger than that. if (canReturnInRegister && (useType == TYP_UNKNOWN) && (structSize <= MAX_PASS_SINGLEREG_BYTES)) { // We set the "primitive" useType based upon the structSize // and also examine the clsHnd to see if it is an HFA of count one // // The ABI for struct returns in varArg methods, is same as the normal case, // so pass false for isVararg useType = getPrimitiveTypeForStruct(structSize, clsHnd, /*isVararg=*/false); if (useType != TYP_UNKNOWN) { if (structSize == genTypeSize(useType)) { // Currently: 1, 2, 4, or 8 byte structs howToReturnStruct = SPK_PrimitiveType; } else { // Currently: 3, 5, 6, or 7 byte structs assert(structSize < genTypeSize(useType)); howToReturnStruct = SPK_EnclosingType; } } } #ifdef TARGET_64BIT // Note this handles an odd case when FEATURE_MULTIREG_RET is disabled and HFAs are enabled // // getPrimitiveTypeForStruct will return TYP_UNKNOWN for a struct that is an HFA of two floats // because when HFA are enabled, normally we would use two FP registers to pass or return it // // But if we don't have support for multiple register return types, we have to change this. // Since what we have is an 8-byte struct (float + float) we change useType to TYP_I_IMPL // so that the struct is returned instead using an 8-byte integer register. // if ((FEATURE_MULTIREG_RET == 0) && (useType == TYP_UNKNOWN) && (structSize == (2 * sizeof(float))) && IsHfa(clsHnd)) { useType = TYP_I_IMPL; howToReturnStruct = SPK_PrimitiveType; } #endif // Did we change this struct type into a simple "primitive" type? if (useType != TYP_UNKNOWN) { // If so, we should have already set howToReturnStruct, too. assert(howToReturnStruct != SPK_Unknown); } else if (canReturnInRegister) // We can't replace the struct with a "primitive" type { // See if we can return this struct by value, possibly in multiple registers // or if we should return it using a return buffer register // if ((FEATURE_MULTIREG_RET == 1) && (structSize <= MAX_RET_MULTIREG_BYTES)) { // Structs that are HFA's are returned in multiple registers if (IsHfa(clsHnd)) { // HFA's of count one should have been handled by getPrimitiveTypeForStruct assert(GetHfaCount(clsHnd) >= 2); // setup wbPassType and useType indicate that this is returned by value as an HFA // using multiple registers howToReturnStruct = SPK_ByValueAsHfa; useType = TYP_STRUCT; } else // Not an HFA struct type { #ifdef UNIX_AMD64_ABI // The cases of (structDesc.eightByteCount == 1) and (structDesc.eightByteCount == 0) // should have already been handled assert(structDesc.eightByteCount > 1); // setup wbPassType and useType indicate that this is returned by value in multiple registers howToReturnStruct = SPK_ByValue; useType = TYP_STRUCT; assert(structDesc.passedInRegisters == true); #elif defined(TARGET_ARM64) // Structs that are pointer sized or smaller should have been handled by getPrimitiveTypeForStruct assert(structSize > TARGET_POINTER_SIZE); // On ARM64 structs that are 9-16 bytes are returned by value in multiple registers // if (structSize <= (TARGET_POINTER_SIZE * 2)) { // setup wbPassType and useType indicate that this is return by value in multiple registers howToReturnStruct = SPK_ByValue; useType = TYP_STRUCT; } else // a structSize that is 17-32 bytes in size { // Otherwise we return this struct using a return buffer // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_X86) // Only 8-byte structs are return in multiple registers. // We also only support multireg struct returns on x86 to match the native calling convention. // So return 8-byte structs only when the calling convention is a native calling convention. if (structSize == MAX_RET_MULTIREG_BYTES && callConv != CorInfoCallConvExtension::Managed) { // setup wbPassType and useType indicate that this is return by value in multiple registers howToReturnStruct = SPK_ByValue; useType = TYP_STRUCT; } else { // Otherwise we return this struct using a return buffer // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_ARM) // Otherwise we return this struct using a return buffer // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; #else // TARGET_XXX noway_assert(!"Unhandled TARGET in getReturnTypeForStruct (with FEATURE_MULTIREG_ARGS=1)"); #endif // TARGET_XXX } } else // (structSize > MAX_RET_MULTIREG_BYTES) || (FEATURE_MULTIREG_RET == 0) { // We have a (large) struct that can't be replaced with a "primitive" type // and can't be returned in multiple registers // We return this struct using a return buffer register // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } } // 'howToReturnStruct' must be set to one of the valid values before we return assert(howToReturnStruct != SPK_Unknown); if (wbReturnStruct != nullptr) { *wbReturnStruct = howToReturnStruct; } return useType; } /////////////////////////////////////////////////////////////////////////////// // // MEASURE_NOWAY: code to measure and rank dynamic occurrences of noway_assert. // (Just the appearances of noway_assert, whether the assert is true or false.) // This might help characterize the cost of noway_assert in non-DEBUG builds, // or determine which noway_assert should be simple DEBUG-only asserts. // /////////////////////////////////////////////////////////////////////////////// #if MEASURE_NOWAY struct FileLine { char* m_file; unsigned m_line; char* m_condStr; FileLine() : m_file(nullptr), m_line(0), m_condStr(nullptr) { } FileLine(const char* file, unsigned line, const char* condStr) : m_line(line) { size_t newSize = (strlen(file) + 1) * sizeof(char); m_file = HostAllocator::getHostAllocator().allocate<char>(newSize); strcpy_s(m_file, newSize, file); newSize = (strlen(condStr) + 1) * sizeof(char); m_condStr = HostAllocator::getHostAllocator().allocate<char>(newSize); strcpy_s(m_condStr, newSize, condStr); } FileLine(const FileLine& other) { m_file = other.m_file; m_line = other.m_line; m_condStr = other.m_condStr; } // GetHashCode() and Equals() are needed by JitHashTable static unsigned GetHashCode(FileLine fl) { assert(fl.m_file != nullptr); unsigned code = fl.m_line; for (const char* p = fl.m_file; *p != '\0'; p++) { code += *p; } // Could also add condStr. return code; } static bool Equals(FileLine fl1, FileLine fl2) { return (fl1.m_line == fl2.m_line) && (0 == strcmp(fl1.m_file, fl2.m_file)); } }; typedef JitHashTable<FileLine, FileLine, size_t, HostAllocator> FileLineToCountMap; FileLineToCountMap* NowayAssertMap; void Compiler::RecordNowayAssert(const char* filename, unsigned line, const char* condStr) { if (NowayAssertMap == nullptr) { NowayAssertMap = new (HostAllocator::getHostAllocator()) FileLineToCountMap(HostAllocator::getHostAllocator()); } FileLine fl(filename, line, condStr); size_t* pCount = NowayAssertMap->LookupPointer(fl); if (pCount == nullptr) { NowayAssertMap->Set(fl, 1); } else { ++(*pCount); } } void RecordNowayAssertGlobal(const char* filename, unsigned line, const char* condStr) { if ((JitConfig.JitMeasureNowayAssert() == 1) && (JitTls::GetCompiler() != nullptr)) { JitTls::GetCompiler()->RecordNowayAssert(filename, line, condStr); } } struct NowayAssertCountMap { size_t count; FileLine fl; NowayAssertCountMap() : count(0) { } struct compare { bool operator()(const NowayAssertCountMap& elem1, const NowayAssertCountMap& elem2) { return (ssize_t)elem2.count < (ssize_t)elem1.count; // sort in descending order } }; }; void DisplayNowayAssertMap() { if (NowayAssertMap != nullptr) { FILE* fout; LPCWSTR strJitMeasureNowayAssertFile = JitConfig.JitMeasureNowayAssertFile(); if (strJitMeasureNowayAssertFile != nullptr) { fout = _wfopen(strJitMeasureNowayAssertFile, W("a")); if (fout == nullptr) { fprintf(jitstdout, "Failed to open JitMeasureNowayAssertFile \"%ws\"\n", strJitMeasureNowayAssertFile); return; } } else { fout = jitstdout; } // Iterate noway assert map, create sorted table by occurrence, dump it. unsigned count = NowayAssertMap->GetCount(); NowayAssertCountMap* nacp = new NowayAssertCountMap[count]; unsigned i = 0; for (FileLineToCountMap::KeyIterator iter = NowayAssertMap->Begin(), end = NowayAssertMap->End(); !iter.Equal(end); ++iter) { nacp[i].count = iter.GetValue(); nacp[i].fl = iter.Get(); ++i; } jitstd::sort(nacp, nacp + count, NowayAssertCountMap::compare()); if (fout == jitstdout) { // Don't output the header if writing to a file, since we'll be appending to existing dumps in that case. fprintf(fout, "\nnoway_assert counts:\n"); fprintf(fout, "count, file, line, text\n"); } for (i = 0; i < count; i++) { fprintf(fout, "%u, %s, %u, \"%s\"\n", nacp[i].count, nacp[i].fl.m_file, nacp[i].fl.m_line, nacp[i].fl.m_condStr); } if (fout != jitstdout) { fclose(fout); fout = nullptr; } } } #endif // MEASURE_NOWAY /***************************************************************************** * variables to keep track of how many iterations we go in a dataflow pass */ #if DATAFLOW_ITER unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow #endif // DATAFLOW_ITER #if MEASURE_BLOCK_SIZE size_t genFlowNodeSize; size_t genFlowNodeCnt; #endif // MEASURE_BLOCK_SIZE /*****************************************************************************/ // We keep track of methods we've already compiled. /***************************************************************************** * Declare the statics */ #ifdef DEBUG /* static */ LONG Compiler::s_compMethodsCount = 0; // to produce unique label names #endif #if MEASURE_MEM_ALLOC /* static */ bool Compiler::s_dspMemStats = false; #endif #ifndef PROFILING_SUPPORTED const bool Compiler::Options::compNoPInvokeInlineCB = false; #endif /***************************************************************************** * * One time initialization code */ /* static */ void Compiler::compStartup() { #if DISPLAY_SIZES grossVMsize = grossNCsize = totalNCsize = 0; #endif // DISPLAY_SIZES /* Initialize the table of tree node sizes */ GenTree::InitNodeSize(); #ifdef JIT32_GCENCODER // Initialize the GC encoder lookup table GCInfo::gcInitEncoderLookupTable(); #endif /* Initialize the emitter */ emitter::emitInit(); // Static vars of ValueNumStore ValueNumStore::InitValueNumStoreStatics(); compDisplayStaticSizes(jitstdout); } /***************************************************************************** * * One time finalization code */ /* static */ void Compiler::compShutdown() { if (s_pAltJitExcludeAssembliesList != nullptr) { s_pAltJitExcludeAssembliesList->~AssemblyNamesList2(); // call the destructor s_pAltJitExcludeAssembliesList = nullptr; } #ifdef DEBUG if (s_pJitDisasmIncludeAssembliesList != nullptr) { s_pJitDisasmIncludeAssembliesList->~AssemblyNamesList2(); // call the destructor s_pJitDisasmIncludeAssembliesList = nullptr; } #endif // DEBUG #if MEASURE_NOWAY DisplayNowayAssertMap(); #endif // MEASURE_NOWAY /* Shut down the emitter */ emitter::emitDone(); #if defined(DEBUG) || defined(INLINE_DATA) // Finish reading and/or writing inline xml if (JitConfig.JitInlineDumpXmlFile() != nullptr) { FILE* file = _wfopen(JitConfig.JitInlineDumpXmlFile(), W("a")); if (file != nullptr) { InlineStrategy::FinalizeXml(file); fclose(file); } else { InlineStrategy::FinalizeXml(); } } #endif // defined(DEBUG) || defined(INLINE_DATA) #if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS if (genMethodCnt == 0) { return; } #endif #if NODEBASH_STATS GenTree::ReportOperBashing(jitstdout); #endif // Where should we write our statistics output? FILE* fout = jitstdout; #ifdef FEATURE_JIT_METHOD_PERF if (compJitTimeLogFilename != nullptr) { FILE* jitTimeLogFile = _wfopen(compJitTimeLogFilename, W("a")); if (jitTimeLogFile != nullptr) { CompTimeSummaryInfo::s_compTimeSummary.Print(jitTimeLogFile); fclose(jitTimeLogFile); } } JitTimer::Shutdown(); #endif // FEATURE_JIT_METHOD_PERF #if COUNT_AST_OPERS // Add up all the counts so that we can show percentages of total unsigned totalCount = 0; for (unsigned op = 0; op < GT_COUNT; op++) { totalCount += GenTree::s_gtNodeCounts[op]; } if (totalCount > 0) { struct OperInfo { unsigned Count; unsigned Size; genTreeOps Oper; }; OperInfo opers[GT_COUNT]; for (unsigned op = 0; op < GT_COUNT; op++) { opers[op] = {GenTree::s_gtNodeCounts[op], GenTree::s_gtTrueSizes[op], static_cast<genTreeOps>(op)}; } jitstd::sort(opers, opers + ArrLen(opers), [](const OperInfo& l, const OperInfo& r) { // We'll be sorting in descending order. return l.Count >= r.Count; }); unsigned remainingCount = totalCount; unsigned remainingCountLarge = 0; unsigned remainingCountSmall = 0; unsigned countLarge = 0; unsigned countSmall = 0; fprintf(fout, "\nGenTree operator counts (approximate):\n\n"); for (OperInfo oper : opers) { unsigned size = oper.Size; unsigned count = oper.Count; double percentage = 100.0 * count / totalCount; if (size > TREE_NODE_SZ_SMALL) { countLarge += count; } else { countSmall += count; } // Let's not show anything below a threshold if (percentage >= 0.5) { fprintf(fout, " GT_%-17s %7u (%4.1lf%%) %3u bytes each\n", GenTree::OpName(oper.Oper), count, percentage, size); remainingCount -= count; } else { if (size > TREE_NODE_SZ_SMALL) { remainingCountLarge += count; } else { remainingCountSmall += count; } } } if (remainingCount > 0) { fprintf(fout, " All other GT_xxx ... %7u (%4.1lf%%) ... %4.1lf%% small + %4.1lf%% large\n", remainingCount, 100.0 * remainingCount / totalCount, 100.0 * remainingCountSmall / totalCount, 100.0 * remainingCountLarge / totalCount); } fprintf(fout, " -----------------------------------------------------\n"); fprintf(fout, " Total ....... %11u --ALL-- ... %4.1lf%% small + %4.1lf%% large\n", totalCount, 100.0 * countSmall / totalCount, 100.0 * countLarge / totalCount); fprintf(fout, "\n"); } #endif // COUNT_AST_OPERS #if DISPLAY_SIZES if (grossVMsize && grossNCsize) { fprintf(fout, "\n"); fprintf(fout, "--------------------------------------\n"); fprintf(fout, "Function and GC info size stats\n"); fprintf(fout, "--------------------------------------\n"); fprintf(fout, "[%7u VM, %8u %6s %4u%%] %s\n", grossVMsize, grossNCsize, Target::g_tgtCPUName, 100 * grossNCsize / grossVMsize, "Total (excluding GC info)"); fprintf(fout, "[%7u VM, %8u %6s %4u%%] %s\n", grossVMsize, totalNCsize, Target::g_tgtCPUName, 100 * totalNCsize / grossVMsize, "Total (including GC info)"); if (gcHeaderISize || gcHeaderNSize) { fprintf(fout, "\n"); fprintf(fout, "GC tables : [%7uI,%7uN] %7u byt (%u%% of IL, %u%% of %s).\n", gcHeaderISize + gcPtrMapISize, gcHeaderNSize + gcPtrMapNSize, totalNCsize - grossNCsize, 100 * (totalNCsize - grossNCsize) / grossVMsize, 100 * (totalNCsize - grossNCsize) / grossNCsize, Target::g_tgtCPUName); fprintf(fout, "GC headers : [%7uI,%7uN] %7u byt, [%4.1fI,%4.1fN] %4.1f byt/meth\n", gcHeaderISize, gcHeaderNSize, gcHeaderISize + gcHeaderNSize, (float)gcHeaderISize / (genMethodICnt + 0.001), (float)gcHeaderNSize / (genMethodNCnt + 0.001), (float)(gcHeaderISize + gcHeaderNSize) / genMethodCnt); fprintf(fout, "GC ptr maps : [%7uI,%7uN] %7u byt, [%4.1fI,%4.1fN] %4.1f byt/meth\n", gcPtrMapISize, gcPtrMapNSize, gcPtrMapISize + gcPtrMapNSize, (float)gcPtrMapISize / (genMethodICnt + 0.001), (float)gcPtrMapNSize / (genMethodNCnt + 0.001), (float)(gcPtrMapISize + gcPtrMapNSize) / genMethodCnt); } else { fprintf(fout, "\n"); fprintf(fout, "GC tables take up %u bytes (%u%% of instr, %u%% of %6s code).\n", totalNCsize - grossNCsize, 100 * (totalNCsize - grossNCsize) / grossVMsize, 100 * (totalNCsize - grossNCsize) / grossNCsize, Target::g_tgtCPUName); } #ifdef DEBUG #if DOUBLE_ALIGN fprintf(fout, "%u out of %u methods generated with double-aligned stack\n", Compiler::s_lvaDoubleAlignedProcsCount, genMethodCnt); #endif #endif } #endif // DISPLAY_SIZES #if CALL_ARG_STATS compDispCallArgStats(fout); #endif #if COUNT_BASIC_BLOCKS fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Basic block count frequency table:\n"); fprintf(fout, "--------------------------------------------------\n"); bbCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "IL method size frequency table for methods with a single basic block:\n"); fprintf(fout, "--------------------------------------------------\n"); bbOneBBSizeTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); #endif // COUNT_BASIC_BLOCKS #if COUNT_LOOPS fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Loop stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Total number of methods with loops is %5u\n", totalLoopMethods); fprintf(fout, "Total number of loops is %5u\n", totalLoopCount); fprintf(fout, "Maximum number of loops per method is %5u\n", maxLoopsPerMethod); fprintf(fout, "# of methods overflowing nat loop table is %5u\n", totalLoopOverflows); fprintf(fout, "Total number of 'unnatural' loops is %5u\n", totalUnnatLoopCount); fprintf(fout, "# of methods overflowing unnat loop limit is %5u\n", totalUnnatLoopOverflows); fprintf(fout, "Total number of loops with an iterator is %5u\n", iterLoopCount); fprintf(fout, "Total number of loops with a simple iterator is %5u\n", simpleTestLoopCount); fprintf(fout, "Total number of loops with a constant iterator is %5u\n", constIterLoopCount); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Loop count frequency table:\n"); fprintf(fout, "--------------------------------------------------\n"); loopCountTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Loop exit count frequency table:\n"); fprintf(fout, "--------------------------------------------------\n"); loopExitCountTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); #endif // COUNT_LOOPS #if DATAFLOW_ITER fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Total number of iterations in the CSE dataflow loop is %5u\n", CSEiterCount); fprintf(fout, "Total number of iterations in the CF dataflow loop is %5u\n", CFiterCount); #endif // DATAFLOW_ITER #if MEASURE_NODE_SIZE fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "GenTree node allocation stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Allocated %6I64u tree nodes (%7I64u bytes total, avg %4I64u bytes per method)\n", genNodeSizeStats.genTreeNodeCnt, genNodeSizeStats.genTreeNodeSize, genNodeSizeStats.genTreeNodeSize / genMethodCnt); fprintf(fout, "Allocated %7I64u bytes of unused tree node space (%3.2f%%)\n", genNodeSizeStats.genTreeNodeSize - genNodeSizeStats.genTreeNodeActualSize, (float)(100 * (genNodeSizeStats.genTreeNodeSize - genNodeSizeStats.genTreeNodeActualSize)) / genNodeSizeStats.genTreeNodeSize); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of per-method GenTree node counts:\n"); genTreeNcntHist.dump(fout); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of per-method GenTree node allocations (in bytes):\n"); genTreeNsizHist.dump(fout); #endif // MEASURE_NODE_SIZE #if MEASURE_BLOCK_SIZE fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "BasicBlock and flowList/BasicBlockList allocation stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Allocated %6u basic blocks (%7u bytes total, avg %4u bytes per method)\n", BasicBlock::s_Count, BasicBlock::s_Size, BasicBlock::s_Size / genMethodCnt); fprintf(fout, "Allocated %6u flow nodes (%7u bytes total, avg %4u bytes per method)\n", genFlowNodeCnt, genFlowNodeSize, genFlowNodeSize / genMethodCnt); #endif // MEASURE_BLOCK_SIZE #if MEASURE_MEM_ALLOC if (s_dspMemStats) { fprintf(fout, "\nAll allocations:\n"); ArenaAllocator::dumpAggregateMemStats(jitstdout); fprintf(fout, "\nLargest method:\n"); ArenaAllocator::dumpMaxMemStats(jitstdout); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of total memory allocated per method (in KB):\n"); memAllocHist.dump(fout); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of total memory used per method (in KB):\n"); memUsedHist.dump(fout); } #endif // MEASURE_MEM_ALLOC #if LOOP_HOIST_STATS #ifdef DEBUG // Always display loop stats in retail if (JitConfig.DisplayLoopHoistStats() != 0) #endif // DEBUG { PrintAggregateLoopHoistStats(jitstdout); } #endif // LOOP_HOIST_STATS #if TRACK_ENREG_STATS if (JitConfig.JitEnregStats() != 0) { s_enregisterStats.Dump(fout); } #endif // TRACK_ENREG_STATS #if MEASURE_PTRTAB_SIZE fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "GC pointer table stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Reg pointer descriptor size (internal): %8u (avg %4u per method)\n", GCInfo::s_gcRegPtrDscSize, GCInfo::s_gcRegPtrDscSize / genMethodCnt); fprintf(fout, "Total pointer table size: %8u (avg %4u per method)\n", GCInfo::s_gcTotalPtrTabSize, GCInfo::s_gcTotalPtrTabSize / genMethodCnt); #endif // MEASURE_PTRTAB_SIZE #if MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || MEASURE_PTRTAB_SIZE || DISPLAY_SIZES if (genMethodCnt != 0) { fprintf(fout, "\n"); fprintf(fout, "A total of %6u methods compiled", genMethodCnt); #if DISPLAY_SIZES if (genMethodICnt || genMethodNCnt) { fprintf(fout, " (%u interruptible, %u non-interruptible)", genMethodICnt, genMethodNCnt); } #endif // DISPLAY_SIZES fprintf(fout, ".\n"); } #endif // MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || MEASURE_PTRTAB_SIZE || DISPLAY_SIZES #if EMITTER_STATS emitterStats(fout); #endif #if MEASURE_FATAL fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Fatal errors stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, " badCode: %u\n", fatal_badCode); fprintf(fout, " noWay: %u\n", fatal_noWay); fprintf(fout, " implLimitation: %u\n", fatal_implLimitation); fprintf(fout, " NOMEM: %u\n", fatal_NOMEM); fprintf(fout, " noWayAssertBody: %u\n", fatal_noWayAssertBody); #ifdef DEBUG fprintf(fout, " noWayAssertBodyArgs: %u\n", fatal_noWayAssertBodyArgs); #endif // DEBUG fprintf(fout, " NYI: %u\n", fatal_NYI); #endif // MEASURE_FATAL } /***************************************************************************** * Display static data structure sizes. */ /* static */ void Compiler::compDisplayStaticSizes(FILE* fout) { #if MEASURE_NODE_SIZE GenTree::DumpNodeSizes(fout); #endif #if EMITTER_STATS emitterStaticStats(fout); #endif } /***************************************************************************** * * Constructor */ void Compiler::compInit(ArenaAllocator* pAlloc, CORINFO_METHOD_HANDLE methodHnd, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, InlineInfo* inlineInfo) { assert(pAlloc); compArenaAllocator = pAlloc; // Inlinee Compile object will only be allocated when needed for the 1st time. InlineeCompiler = nullptr; // Set the inline info. impInlineInfo = inlineInfo; info.compCompHnd = compHnd; info.compMethodHnd = methodHnd; info.compMethodInfo = methodInfo; #ifdef DEBUG bRangeAllowStress = false; #endif #if defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS // Initialize the method name and related info, as it is used early in determining whether to // apply stress modes, and which ones to apply. // Note that even allocating memory can invoke the stress mechanism, so ensure that both // 'compMethodName' and 'compFullName' are either null or valid before we allocate. // (The stress mode checks references these prior to checking bRangeAllowStress.) // info.compMethodName = nullptr; info.compClassName = nullptr; info.compFullName = nullptr; const char* classNamePtr; const char* methodName; methodName = eeGetMethodName(methodHnd, &classNamePtr); unsigned len = (unsigned)roundUp(strlen(classNamePtr) + 1); info.compClassName = getAllocator(CMK_DebugOnly).allocate<char>(len); info.compMethodName = methodName; strcpy_s((char*)info.compClassName, len, classNamePtr); info.compFullName = eeGetMethodFullName(methodHnd); info.compPerfScore = 0.0; info.compMethodSuperPMIIndex = g_jitHost->getIntConfigValue(W("SuperPMIMethodContextNumber"), -1); #endif // defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS #if defined(DEBUG) || defined(INLINE_DATA) info.compMethodHashPrivate = 0; #endif // defined(DEBUG) || defined(INLINE_DATA) #ifdef DEBUG // Opt-in to jit stress based on method hash ranges. // // Note the default (with JitStressRange not set) is that all // methods will be subject to stress. static ConfigMethodRange fJitStressRange; fJitStressRange.EnsureInit(JitConfig.JitStressRange()); assert(!fJitStressRange.Error()); bRangeAllowStress = fJitStressRange.Contains(info.compMethodHash()); #endif // DEBUG eeInfoInitialized = false; compDoAggressiveInlining = false; if (compIsForInlining()) { m_inlineStrategy = nullptr; compInlineResult = inlineInfo->inlineResult; } else { m_inlineStrategy = new (this, CMK_Inlining) InlineStrategy(this); compInlineResult = nullptr; } // Initialize this to the first phase to run. mostRecentlyActivePhase = PHASE_PRE_IMPORT; // Initially, no phase checks are active. activePhaseChecks = PhaseChecks::CHECK_NONE; #ifdef FEATURE_TRACELOGGING // Make sure JIT telemetry is initialized as soon as allocations can be made // but no later than a point where noway_asserts can be thrown. // 1. JIT telemetry could allocate some objects internally. // 2. NowayAsserts are tracked through telemetry. // Note: JIT telemetry could gather data when compiler is not fully initialized. // So you have to initialize the compiler variables you use for telemetry. assert((unsigned)PHASE_PRE_IMPORT == 0); info.compILCodeSize = 0; info.compMethodHnd = nullptr; compJitTelemetry.Initialize(this); #endif fgInit(); lvaInit(); if (!compIsForInlining()) { codeGen = getCodeGenerator(this); optInit(); hashBv::Init(this); compVarScopeMap = nullptr; // If this method were a real constructor for Compiler, these would // become method initializations. impPendingBlockMembers = JitExpandArray<BYTE>(getAllocator()); impSpillCliquePredMembers = JitExpandArray<BYTE>(getAllocator()); impSpillCliqueSuccMembers = JitExpandArray<BYTE>(getAllocator()); new (&genIPmappings, jitstd::placement_t()) jitstd::list<IPmappingDsc>(getAllocator(CMK_DebugInfo)); #ifdef DEBUG new (&genPreciseIPmappings, jitstd::placement_t()) jitstd::list<PreciseIPMapping>(getAllocator(CMK_DebugOnly)); #endif lvMemoryPerSsaData = SsaDefArray<SsaMemDef>(); // // Initialize all the per-method statistics gathering data structures. // optLoopsCloned = 0; #if LOOP_HOIST_STATS m_loopsConsidered = 0; m_curLoopHasHoistedExpression = false; m_loopsWithHoistedExpressions = 0; m_totalHoistedExpressions = 0; #endif // LOOP_HOIST_STATS #if MEASURE_NODE_SIZE genNodeSizeStatsPerFunc.Init(); #endif // MEASURE_NODE_SIZE } else { codeGen = nullptr; } compJmpOpUsed = false; compLongUsed = false; compTailCallUsed = false; compTailPrefixSeen = false; compLocallocSeen = false; compLocallocUsed = false; compLocallocOptimized = false; compQmarkRationalized = false; compQmarkUsed = false; compFloatingPointUsed = false; compSuppressedZeroInit = false; compNeedsGSSecurityCookie = false; compGSReorderStackLayout = false; compGeneratingProlog = false; compGeneratingEpilog = false; compLSRADone = false; compRationalIRForm = false; #ifdef DEBUG compCodeGenDone = false; opts.compMinOptsIsUsed = false; #endif opts.compMinOptsIsSet = false; // Used by fgFindJumpTargets for inlining heuristics. opts.instrCount = 0; // Used to track when we should consider running EarlyProp optMethodFlags = 0; optNoReturnCallCount = 0; #ifdef DEBUG m_nodeTestData = nullptr; m_loopHoistCSEClass = FIRST_LOOP_HOIST_CSE_CLASS; #endif m_switchDescMap = nullptr; m_blockToEHPreds = nullptr; m_fieldSeqStore = nullptr; m_zeroOffsetFieldMap = nullptr; m_arrayInfoMap = nullptr; m_refAnyClass = nullptr; for (MemoryKind memoryKind : allMemoryKinds()) { m_memorySsaMap[memoryKind] = nullptr; } #ifdef DEBUG if (!compIsForInlining()) { compDoComponentUnitTestsOnce(); } #endif // DEBUG vnStore = nullptr; m_opAsgnVarDefSsaNums = nullptr; m_nodeToLoopMemoryBlockMap = nullptr; fgSsaPassesCompleted = 0; fgVNPassesCompleted = 0; // check that HelperCallProperties are initialized assert(s_helperCallProperties.IsPure(CORINFO_HELP_GETSHARED_GCSTATIC_BASE)); assert(!s_helperCallProperties.IsPure(CORINFO_HELP_GETFIELDOBJ)); // quick sanity check // We start with the flow graph in tree-order fgOrder = FGOrderTree; m_classLayoutTable = nullptr; #ifdef FEATURE_SIMD m_simdHandleCache = nullptr; #endif // FEATURE_SIMD compUsesThrowHelper = false; } /***************************************************************************** * * Destructor */ void Compiler::compDone() { } void* Compiler::compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ void** ppIndirection) /* OUT */ { void* addr; if (info.compMatchedVM) { addr = info.compCompHnd->getHelperFtn(ftnNum, ppIndirection); } else { // If we don't have a matched VM, we won't get valid results when asking for a helper function. addr = UlongToPtr(0xCA11CA11); // "callcall" } return addr; } unsigned Compiler::compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd) { var_types sigType = genActualType(JITtype2varType(cit)); unsigned sigSize; sigSize = genTypeSize(sigType); if (cit == CORINFO_TYPE_VALUECLASS) { sigSize = info.compCompHnd->getClassSize(clsHnd); } else if (cit == CORINFO_TYPE_REFANY) { sigSize = 2 * TARGET_POINTER_SIZE; } return sigSize; } #ifdef DEBUG static bool DidComponentUnitTests = false; void Compiler::compDoComponentUnitTestsOnce() { if (!JitConfig.RunComponentUnitTests()) { return; } if (!DidComponentUnitTests) { DidComponentUnitTests = true; ValueNumStore::RunTests(this); BitSetSupport::TestSuite(getAllocatorDebugOnly()); } } //------------------------------------------------------------------------ // compGetJitDefaultFill: // // Return Value: // An unsigned char value used to initizalize memory allocated by the JIT. // The default value is taken from COMPLUS_JitDefaultFill, if is not set // the value will be 0xdd. When JitStress is active a random value based // on the method hash is used. // // Notes: // Note that we can't use small values like zero, because we have some // asserts that can fire for such values. // // static unsigned char Compiler::compGetJitDefaultFill(Compiler* comp) { unsigned char defaultFill = (unsigned char)JitConfig.JitDefaultFill(); if (comp != nullptr && comp->compStressCompile(STRESS_GENERIC_VARN, 50)) { unsigned temp; temp = comp->info.compMethodHash(); temp = (temp >> 16) ^ temp; temp = (temp >> 8) ^ temp; temp = temp & 0xff; // asserts like this: assert(!IsUninitialized(stkLvl)); // mean that small values for defaultFill are problematic // so we make the value larger in that case. if (temp < 0x20) { temp |= 0x80; } // Make a misaligned pointer value to reduce probability of getting a valid value and firing // assert(!IsUninitialized(pointer)). temp |= 0x1; defaultFill = (unsigned char)temp; } return defaultFill; } #endif // DEBUG /*****************************************************************************/ #ifdef DEBUG /*****************************************************************************/ VarName Compiler::compVarName(regNumber reg, bool isFloatReg) { if (isFloatReg) { assert(genIsValidFloatReg(reg)); } else { assert(genIsValidReg(reg)); } if ((info.compVarScopesCount > 0) && compCurBB && opts.varNames) { unsigned lclNum; LclVarDsc* varDsc; /* Look for the matching register */ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { /* If the variable is not in a register, or not in the register we're looking for, quit. */ /* Also, if it is a compiler generated variable (i.e. slot# > info.compVarScopesCount), don't bother. */ if ((varDsc->lvRegister != 0) && (varDsc->GetRegNum() == reg) && (varDsc->lvSlotNum < info.compVarScopesCount)) { /* check if variable in that register is live */ if (VarSetOps::IsMember(this, compCurLife, varDsc->lvVarIndex)) { /* variable is live - find the corresponding slot */ VarScopeDsc* varScope = compFindLocalVar(varDsc->lvSlotNum, compCurBB->bbCodeOffs, compCurBB->bbCodeOffsEnd); if (varScope) { return varScope->vsdName; } } } } } return nullptr; } const char* Compiler::compRegVarName(regNumber reg, bool displayVar, bool isFloatReg) { #ifdef TARGET_ARM isFloatReg = genIsValidFloatReg(reg); #endif if (displayVar && (reg != REG_NA)) { VarName varName = compVarName(reg, isFloatReg); if (varName) { const int NAME_VAR_REG_BUFFER_LEN = 4 + 256 + 1; static char nameVarReg[2][NAME_VAR_REG_BUFFER_LEN]; // to avoid overwriting the buffer when have 2 // consecutive calls before printing static int index = 0; // for circular index into the name array index = (index + 1) % 2; // circular reuse of index sprintf_s(nameVarReg[index], NAME_VAR_REG_BUFFER_LEN, "%s'%s'", getRegName(reg), VarNameToStr(varName)); return nameVarReg[index]; } } /* no debug info required or no variable in that register -> return standard name */ return getRegName(reg); } const char* Compiler::compRegNameForSize(regNumber reg, size_t size) { if (size == 0 || size >= 4) { return compRegVarName(reg, true); } // clang-format off static const char * sizeNames[][2] = { { "al", "ax" }, { "cl", "cx" }, { "dl", "dx" }, { "bl", "bx" }, #ifdef TARGET_AMD64 { "spl", "sp" }, // ESP { "bpl", "bp" }, // EBP { "sil", "si" }, // ESI { "dil", "di" }, // EDI { "r8b", "r8w" }, { "r9b", "r9w" }, { "r10b", "r10w" }, { "r11b", "r11w" }, { "r12b", "r12w" }, { "r13b", "r13w" }, { "r14b", "r14w" }, { "r15b", "r15w" }, #endif // TARGET_AMD64 }; // clang-format on assert(isByteReg(reg)); assert(genRegMask(reg) & RBM_BYTE_REGS); assert(size == 1 || size == 2); return sizeNames[reg][size - 1]; } const char* Compiler::compLocalVarName(unsigned varNum, unsigned offs) { unsigned i; VarScopeDsc* t; for (i = 0, t = info.compVarScopes; i < info.compVarScopesCount; i++, t++) { if (t->vsdVarNum != varNum) { continue; } if (offs >= t->vsdLifeBeg && offs < t->vsdLifeEnd) { return VarNameToStr(t->vsdName); } } return nullptr; } /*****************************************************************************/ #endif // DEBUG /*****************************************************************************/ void Compiler::compSetProcessor() { // // NOTE: This function needs to be kept in sync with EEJitManager::SetCpuInfo() in vm\codeman.cpp // const JitFlags& jitFlags = *opts.jitFlags; #if defined(TARGET_ARM) info.genCPU = CPU_ARM; #elif defined(TARGET_ARM64) info.genCPU = CPU_ARM64; #elif defined(TARGET_AMD64) info.genCPU = CPU_X64; #elif defined(TARGET_X86) if (jitFlags.IsSet(JitFlags::JIT_FLAG_TARGET_P4)) info.genCPU = CPU_X86_PENTIUM_4; else info.genCPU = CPU_X86; #endif // // Processor specific optimizations // CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_AMD64 opts.compUseCMOV = true; #elif defined(TARGET_X86) opts.compUseCMOV = jitFlags.IsSet(JitFlags::JIT_FLAG_USE_CMOV); #ifdef DEBUG if (opts.compUseCMOV) opts.compUseCMOV = !compStressCompile(STRESS_USE_CMOV, 50); #endif // DEBUG #endif // TARGET_X86 // The VM will set the ISA flags depending on actual hardware support // and any specified config switches specified by the user. The exception // here is for certain "artificial ISAs" such as Vector64/128/256 where they // don't actually exist. The JIT is in charge of adding those and ensuring // the total sum of flags is still valid. CORINFO_InstructionSetFlags instructionSetFlags = jitFlags.GetInstructionSetFlags(); opts.compSupportsISA = 0; opts.compSupportsISAReported = 0; opts.compSupportsISAExactly = 0; #if defined(TARGET_XARCH) instructionSetFlags.AddInstructionSet(InstructionSet_Vector128); instructionSetFlags.AddInstructionSet(InstructionSet_Vector256); #endif // TARGET_XARCH #if defined(TARGET_ARM64) instructionSetFlags.AddInstructionSet(InstructionSet_Vector64); instructionSetFlags.AddInstructionSet(InstructionSet_Vector128); #endif // TARGET_ARM64 instructionSetFlags = EnsureInstructionSetFlagsAreValid(instructionSetFlags); opts.setSupportedISAs(instructionSetFlags); #ifdef TARGET_XARCH if (!compIsForInlining()) { if (canUseVexEncoding()) { codeGen->GetEmitter()->SetUseVEXEncoding(true); // Assume each JITted method does not contain AVX instruction at first codeGen->GetEmitter()->SetContainsAVX(false); codeGen->GetEmitter()->SetContains256bitAVX(false); } } #endif // TARGET_XARCH } bool Compiler::notifyInstructionSetUsage(CORINFO_InstructionSet isa, bool supported) const { const char* isaString = InstructionSetToString(isa); JITDUMP("Notify VM instruction set (%s) %s be supported.\n", isaString, supported ? "must" : "must not"); return info.compCompHnd->notifyInstructionSetUsage(isa, supported); } #ifdef PROFILING_SUPPORTED // A Dummy routine to receive Enter/Leave/Tailcall profiler callbacks. // These are used when complus_JitEltHookEnabled=1 #ifdef TARGET_AMD64 void DummyProfilerELTStub(UINT_PTR ProfilerHandle, UINT_PTR callerSP) { return; } #else //! TARGET_AMD64 void DummyProfilerELTStub(UINT_PTR ProfilerHandle) { return; } #endif //! TARGET_AMD64 #endif // PROFILING_SUPPORTED bool Compiler::compShouldThrowOnNoway( #ifdef FEATURE_TRACELOGGING const char* filename, unsigned line #endif ) { #ifdef FEATURE_TRACELOGGING compJitTelemetry.NotifyNowayAssert(filename, line); #endif // In min opts, we don't want the noway assert to go through the exception // path. Instead we want it to just silently go through codegen for // compat reasons. return !opts.MinOpts(); } // ConfigInteger does not offer an option for decimal flags. Any numbers are interpreted as hex. // I could add the decimal option to ConfigInteger or I could write a function to reinterpret this // value as the user intended. unsigned ReinterpretHexAsDecimal(unsigned in) { // ex: in: 0x100 returns: 100 unsigned result = 0; unsigned index = 1; // default value if (in == INT_MAX) { return in; } while (in) { unsigned digit = in % 16; in >>= 4; assert(digit < 10); result += digit * index; index *= 10; } return result; } void Compiler::compInitOptions(JitFlags* jitFlags) { #ifdef UNIX_AMD64_ABI opts.compNeedToAlignFrame = false; #endif // UNIX_AMD64_ABI memset(&opts, 0, sizeof(opts)); if (compIsForInlining()) { // The following flags are lost when inlining. (They are removed in // Compiler::fgInvokeInlineeCompiler().) assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_PROF_ENTERLEAVE)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_EnC)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_TRACK_TRANSITIONS)); } opts.jitFlags = jitFlags; opts.compFlags = CLFLG_MAXOPT; // Default value is for full optimization if (jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_CODE) || jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT) || jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) { opts.compFlags = CLFLG_MINOPT; } // Don't optimize .cctors (except prejit) or if we're an inlinee else if (!jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && ((info.compFlags & FLG_CCTOR) == FLG_CCTOR) && !compIsForInlining()) { opts.compFlags = CLFLG_MINOPT; } // Default value is to generate a blend of size and speed optimizations // opts.compCodeOpt = BLENDED_CODE; // If the EE sets SIZE_OPT or if we are compiling a Class constructor // we will optimize for code size at the expense of speed // if (jitFlags->IsSet(JitFlags::JIT_FLAG_SIZE_OPT) || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR)) { opts.compCodeOpt = SMALL_CODE; } // // If the EE sets SPEED_OPT we will optimize for speed at the expense of code size // else if (jitFlags->IsSet(JitFlags::JIT_FLAG_SPEED_OPT) || (jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1) && !jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT))) { opts.compCodeOpt = FAST_CODE; assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_SIZE_OPT)); } //------------------------------------------------------------------------- opts.compDbgCode = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_CODE); opts.compDbgInfo = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_INFO); opts.compDbgEnC = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_EnC); #ifdef DEBUG opts.compJitAlignLoopAdaptive = JitConfig.JitAlignLoopAdaptive() == 1; opts.compJitAlignLoopBoundary = (unsigned short)JitConfig.JitAlignLoopBoundary(); opts.compJitAlignLoopMinBlockWeight = (unsigned short)JitConfig.JitAlignLoopMinBlockWeight(); opts.compJitAlignLoopForJcc = JitConfig.JitAlignLoopForJcc() == 1; opts.compJitAlignLoopMaxCodeSize = (unsigned short)JitConfig.JitAlignLoopMaxCodeSize(); opts.compJitHideAlignBehindJmp = JitConfig.JitHideAlignBehindJmp() == 1; #else opts.compJitAlignLoopAdaptive = true; opts.compJitAlignLoopBoundary = DEFAULT_ALIGN_LOOP_BOUNDARY; opts.compJitAlignLoopMinBlockWeight = DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT; opts.compJitAlignLoopMaxCodeSize = DEFAULT_MAX_LOOPSIZE_FOR_ALIGN; opts.compJitHideAlignBehindJmp = true; #endif #ifdef TARGET_XARCH if (opts.compJitAlignLoopAdaptive) { // For adaptive alignment, padding limit is equal to the max instruction encoding // size which is 15 bytes. Hence (32 >> 1) - 1 = 15 bytes. opts.compJitAlignPaddingLimit = (opts.compJitAlignLoopBoundary >> 1) - 1; } else { // For non-adaptive alignment, padding limit is 1 less than the alignment boundary // specified. opts.compJitAlignPaddingLimit = opts.compJitAlignLoopBoundary - 1; } #elif TARGET_ARM64 if (opts.compJitAlignLoopAdaptive) { // For adaptive alignment, padding limit is same as specified by the alignment // boundary because all instructions are 4 bytes long. Hence (32 >> 1) = 16 bytes. opts.compJitAlignPaddingLimit = (opts.compJitAlignLoopBoundary >> 1); } else { // For non-adaptive, padding limit is same as specified by the alignment. opts.compJitAlignPaddingLimit = opts.compJitAlignLoopBoundary; } #endif assert(isPow2(opts.compJitAlignLoopBoundary)); #ifdef TARGET_ARM64 // The minimum encoding size for Arm64 is 4 bytes. assert(opts.compJitAlignLoopBoundary >= 4); #endif #if REGEN_SHORTCUTS || REGEN_CALLPAT // We never want to have debugging enabled when regenerating GC encoding patterns opts.compDbgCode = false; opts.compDbgInfo = false; opts.compDbgEnC = false; #endif compSetProcessor(); #ifdef DEBUG opts.dspOrder = false; // Optionally suppress inliner compiler instance dumping. // if (compIsForInlining()) { if (JitConfig.JitDumpInlinePhases() > 0) { verbose = impInlineInfo->InlinerCompiler->verbose; } else { verbose = false; } } else { verbose = false; codeGen->setVerbose(false); } verboseTrees = verbose && shouldUseVerboseTrees(); verboseSsa = verbose && shouldUseVerboseSsa(); asciiTrees = shouldDumpASCIITrees(); opts.dspDiffable = compIsForInlining() ? impInlineInfo->InlinerCompiler->opts.dspDiffable : false; #endif opts.altJit = false; #if defined(LATE_DISASM) && !defined(DEBUG) // For non-debug builds with the late disassembler built in, we currently always do late disassembly // (we have no way to determine when not to, since we don't have class/method names). // In the DEBUG case, this is initialized to false, below. opts.doLateDisasm = true; #endif #ifdef DEBUG const JitConfigValues::MethodSet* pfAltJit; if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { pfAltJit = &JitConfig.AltJitNgen(); } else { pfAltJit = &JitConfig.AltJit(); } if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT)) { if (pfAltJit->contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.altJit = true; } unsigned altJitLimit = ReinterpretHexAsDecimal(JitConfig.AltJitLimit()); if (altJitLimit > 0 && Compiler::jitTotalMethodCompiled >= altJitLimit) { opts.altJit = false; } } #else // !DEBUG const char* altJitVal; if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { altJitVal = JitConfig.AltJitNgen().list(); } else { altJitVal = JitConfig.AltJit().list(); } if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT)) { // In release mode, you either get all methods or no methods. You must use "*" as the parameter, or we ignore // it. You don't get to give a regular expression of methods to match. // (Partially, this is because we haven't computed and stored the method and class name except in debug, and it // might be expensive to do so.) if ((altJitVal != nullptr) && (strcmp(altJitVal, "*") == 0)) { opts.altJit = true; } } #endif // !DEBUG // Take care of COMPlus_AltJitExcludeAssemblies. if (opts.altJit) { // First, initialize the AltJitExcludeAssemblies list, but only do it once. if (!s_pAltJitExcludeAssembliesListInitialized) { const WCHAR* wszAltJitExcludeAssemblyList = JitConfig.AltJitExcludeAssemblies(); if (wszAltJitExcludeAssemblyList != nullptr) { // NOTE: The Assembly name list is allocated in the process heap, not in the no-release heap, which is // reclaimed // for every compilation. This is ok because we only allocate once, due to the static. s_pAltJitExcludeAssembliesList = new (HostAllocator::getHostAllocator()) AssemblyNamesList2(wszAltJitExcludeAssemblyList, HostAllocator::getHostAllocator()); } s_pAltJitExcludeAssembliesListInitialized = true; } if (s_pAltJitExcludeAssembliesList != nullptr) { // We have an exclusion list. See if this method is in an assembly that is on the list. // Note that we check this for every method, since we might inline across modules, and // if the inlinee module is on the list, we don't want to use the altjit for it. const char* methodAssemblyName = info.compCompHnd->getAssemblyName( info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd))); if (s_pAltJitExcludeAssembliesList->IsInList(methodAssemblyName)) { opts.altJit = false; } } } #ifdef DEBUG bool altJitConfig = !pfAltJit->isEmpty(); // If we have a non-empty AltJit config then we change all of these other // config values to refer only to the AltJit. Otherwise, a lot of COMPlus_* variables // would apply to both the altjit and the normal JIT, but we only care about // debugging the altjit if the COMPlus_AltJit configuration is set. // if (compIsForImportOnly() && (!altJitConfig || opts.altJit)) { if (JitConfig.JitImportBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { assert(!"JitImportBreak reached"); } } bool verboseDump = false; if (!altJitConfig || opts.altJit) { // We should only enable 'verboseDump' when we are actually compiling a matching method // and not enable it when we are just considering inlining a matching method. // if (!compIsForInlining()) { if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if (JitConfig.NgenDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { verboseDump = true; } unsigned ngenHashDumpVal = (unsigned)JitConfig.NgenHashDump(); if ((ngenHashDumpVal != (DWORD)-1) && (ngenHashDumpVal == info.compMethodHash())) { verboseDump = true; } } else { if (JitConfig.JitDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { verboseDump = true; } unsigned jitHashDumpVal = (unsigned)JitConfig.JitHashDump(); if ((jitHashDumpVal != (DWORD)-1) && (jitHashDumpVal == info.compMethodHash())) { verboseDump = true; } } } } // Optionally suppress dumping Tier0 jit requests. // if (verboseDump && jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) { verboseDump = (JitConfig.JitDumpTier0() > 0); } // Optionally suppress dumping except for a specific OSR jit request. // const int dumpAtOSROffset = JitConfig.JitDumpAtOSROffset(); if (verboseDump && (dumpAtOSROffset != -1)) { if (jitFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { verboseDump = (((IL_OFFSET)dumpAtOSROffset) == info.compILEntry); } else { verboseDump = false; } } if (verboseDump) { verbose = true; } #endif // DEBUG #ifdef FEATURE_SIMD // Minimum bar for availing SIMD benefits is SSE2 on AMD64/x86. featureSIMD = jitFlags->IsSet(JitFlags::JIT_FLAG_FEATURE_SIMD); setUsesSIMDTypes(false); #endif // FEATURE_SIMD lvaEnregEHVars = (compEnregLocals() && JitConfig.EnableEHWriteThru()); lvaEnregMultiRegVars = (compEnregLocals() && JitConfig.EnableMultiRegLocals()); if (compIsForImportOnly()) { return; } #if FEATURE_TAILCALL_OPT // By default opportunistic tail call optimization is enabled. // Recognition is done in the importer so this must be set for // inlinees as well. opts.compTailCallOpt = true; #endif // FEATURE_TAILCALL_OPT #if FEATURE_FASTTAILCALL // By default fast tail calls are enabled. opts.compFastTailCalls = true; #endif // FEATURE_FASTTAILCALL // Profile data // fgPgoSchema = nullptr; fgPgoData = nullptr; fgPgoSchemaCount = 0; fgPgoQueryResult = E_FAIL; fgPgoFailReason = nullptr; fgPgoSource = ICorJitInfo::PgoSource::Unknown; if (jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT)) { fgPgoQueryResult = info.compCompHnd->getPgoInstrumentationResults(info.compMethodHnd, &fgPgoSchema, &fgPgoSchemaCount, &fgPgoData, &fgPgoSource); // a failed result that also has a non-NULL fgPgoSchema // indicates that the ILSize for the method no longer matches // the ILSize for the method when profile data was collected. // // We will discard the IBC data in this case // if (FAILED(fgPgoQueryResult)) { fgPgoFailReason = (fgPgoSchema != nullptr) ? "No matching PGO data" : "No PGO data"; fgPgoData = nullptr; fgPgoSchema = nullptr; } // Optionally, disable use of profile data. // else if (JitConfig.JitDisablePgo() > 0) { fgPgoFailReason = "PGO data available, but JitDisablePgo > 0"; fgPgoQueryResult = E_FAIL; fgPgoData = nullptr; fgPgoSchema = nullptr; fgPgoDisabled = true; } #ifdef DEBUG // Optionally, enable use of profile data for only some methods. // else { static ConfigMethodRange JitEnablePgoRange; JitEnablePgoRange.EnsureInit(JitConfig.JitEnablePgoRange()); // Base this decision on the root method hash, so a method either sees all available // profile data (including that for inlinees), or none of it. // const unsigned hash = impInlineRoot()->info.compMethodHash(); if (!JitEnablePgoRange.Contains(hash)) { fgPgoFailReason = "PGO data available, but method hash NOT within JitEnablePgoRange"; fgPgoQueryResult = E_FAIL; fgPgoData = nullptr; fgPgoSchema = nullptr; fgPgoDisabled = true; } } // A successful result implies a non-NULL fgPgoSchema // if (SUCCEEDED(fgPgoQueryResult)) { assert(fgPgoSchema != nullptr); } // A failed result implies a NULL fgPgoSchema // see implementation of Compiler::fgHaveProfileData() // if (FAILED(fgPgoQueryResult)) { assert(fgPgoSchema == nullptr); } #endif } if (compIsForInlining()) { return; } // The rest of the opts fields that we initialize here // should only be used when we generate code for the method // They should not be used when importing or inlining CLANG_FORMAT_COMMENT_ANCHOR; #if FEATURE_TAILCALL_OPT opts.compTailCallLoopOpt = true; #endif // FEATURE_TAILCALL_OPT opts.genFPorder = true; opts.genFPopt = true; opts.instrCount = 0; opts.lvRefCount = 0; #ifdef PROFILING_SUPPORTED opts.compJitELTHookEnabled = false; #endif // PROFILING_SUPPORTED #if defined(TARGET_ARM64) // 0 is default: use the appropriate frame type based on the function. opts.compJitSaveFpLrWithCalleeSavedRegisters = 0; #endif // defined(TARGET_ARM64) #ifdef DEBUG opts.dspInstrs = false; opts.dspLines = false; opts.varNames = false; opts.dmpHex = false; opts.disAsm = false; opts.disAsmSpilled = false; opts.disDiffable = false; opts.disAddr = false; opts.disAlignment = false; opts.dspCode = false; opts.dspEHTable = false; opts.dspDebugInfo = false; opts.dspGCtbls = false; opts.disAsm2 = false; opts.dspUnwind = false; opts.compLongAddress = false; opts.optRepeat = false; #ifdef LATE_DISASM opts.doLateDisasm = false; #endif // LATE_DISASM compDebugBreak = false; // If we have a non-empty AltJit config then we change all of these other // config values to refer only to the AltJit. // if (!altJitConfig || opts.altJit) { if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if ((JitConfig.NgenOrder() & 1) == 1) { opts.dspOrder = true; } if (JitConfig.NgenGCDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspGCtbls = true; } if (JitConfig.NgenDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.disAsm = true; } if (JitConfig.NgenDisasm().contains("SPILLED", nullptr, nullptr)) { opts.disAsmSpilled = true; } if (JitConfig.NgenUnwindDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspUnwind = true; } if (JitConfig.NgenEHDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspEHTable = true; } if (JitConfig.NgenDebugDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspDebugInfo = true; } } else { bool disEnabled = true; // Setup assembly name list for disassembly, if not already set up. if (!s_pJitDisasmIncludeAssembliesListInitialized) { const WCHAR* assemblyNameList = JitConfig.JitDisasmAssemblies(); if (assemblyNameList != nullptr) { s_pJitDisasmIncludeAssembliesList = new (HostAllocator::getHostAllocator()) AssemblyNamesList2(assemblyNameList, HostAllocator::getHostAllocator()); } s_pJitDisasmIncludeAssembliesListInitialized = true; } // If we have an assembly name list for disassembly, also check this method's assembly. if (s_pJitDisasmIncludeAssembliesList != nullptr && !s_pJitDisasmIncludeAssembliesList->IsEmpty()) { const char* assemblyName = info.compCompHnd->getAssemblyName( info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd))); if (!s_pJitDisasmIncludeAssembliesList->IsInList(assemblyName)) { disEnabled = false; } } if (disEnabled) { if ((JitConfig.JitOrder() & 1) == 1) { opts.dspOrder = true; } if (JitConfig.JitGCDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspGCtbls = true; } if (JitConfig.JitDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.disAsm = true; } if (JitConfig.JitDisasm().contains("SPILLED", nullptr, nullptr)) { opts.disAsmSpilled = true; } if (JitConfig.JitUnwindDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspUnwind = true; } if (JitConfig.JitEHDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspEHTable = true; } if (JitConfig.JitDebugDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspDebugInfo = true; } } } if (opts.disAsm && JitConfig.JitDisasmWithGC()) { opts.disasmWithGC = true; } #ifdef LATE_DISASM if (JitConfig.JitLateDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) opts.doLateDisasm = true; #endif // LATE_DISASM // This one applies to both Ngen/Jit Disasm output: COMPlus_JitDiffableDasm=1 if (JitConfig.DiffableDasm() != 0) { opts.disDiffable = true; opts.dspDiffable = true; } // This one applies to both Ngen/Jit Disasm output: COMPlus_JitDasmWithAddress=1 if (JitConfig.JitDasmWithAddress() != 0) { opts.disAddr = true; } if (JitConfig.JitDasmWithAlignmentBoundaries() != 0) { opts.disAlignment = true; } if (JitConfig.JitLongAddress() != 0) { opts.compLongAddress = true; } if (JitConfig.JitOptRepeat().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.optRepeat = true; } } if (verboseDump) { opts.dspCode = true; opts.dspEHTable = true; opts.dspGCtbls = true; opts.disAsm2 = true; opts.dspUnwind = true; verbose = true; verboseTrees = shouldUseVerboseTrees(); verboseSsa = shouldUseVerboseSsa(); codeGen->setVerbose(true); } treesBeforeAfterMorph = (JitConfig.TreesBeforeAfterMorph() == 1); morphNum = 0; // Initialize the morphed-trees counting. expensiveDebugCheckLevel = JitConfig.JitExpensiveDebugCheckLevel(); if (expensiveDebugCheckLevel == 0) { // If we're in a stress mode that modifies the flowgraph, make 1 the default. if (fgStressBBProf() || compStressCompile(STRESS_DO_WHILE_LOOPS, 30)) { expensiveDebugCheckLevel = 1; } } if (verbose) { printf("****** START compiling %s (MethodHash=%08x)\n", info.compFullName, info.compMethodHash()); printf("Generating code for %s %s\n", Target::g_tgtPlatformName(), Target::g_tgtCPUName); printf(""); // in our logic this causes a flush } if (JitConfig.JitBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { assert(!"JitBreak reached"); } unsigned jitHashBreakVal = (unsigned)JitConfig.JitHashBreak(); if ((jitHashBreakVal != (DWORD)-1) && (jitHashBreakVal == info.compMethodHash())) { assert(!"JitHashBreak reached"); } if (verbose || JitConfig.JitDebugBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args) || JitConfig.JitBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { compDebugBreak = true; } memset(compActiveStressModes, 0, sizeof(compActiveStressModes)); // Read function list, if not already read, and there exists such a list. if (!s_pJitFunctionFileInitialized) { const WCHAR* functionFileName = JitConfig.JitFunctionFile(); if (functionFileName != nullptr) { s_pJitMethodSet = new (HostAllocator::getHostAllocator()) MethodSet(functionFileName, HostAllocator::getHostAllocator()); } s_pJitFunctionFileInitialized = true; } #endif // DEBUG //------------------------------------------------------------------------- #ifdef DEBUG assert(!codeGen->isGCTypeFixed()); opts.compGcChecks = (JitConfig.JitGCChecks() != 0) || compStressCompile(STRESS_GENERIC_VARN, 5); #endif #if defined(DEBUG) && defined(TARGET_XARCH) enum { STACK_CHECK_ON_RETURN = 0x1, STACK_CHECK_ON_CALL = 0x2, STACK_CHECK_ALL = 0x3 }; DWORD dwJitStackChecks = JitConfig.JitStackChecks(); if (compStressCompile(STRESS_GENERIC_VARN, 5)) { dwJitStackChecks = STACK_CHECK_ALL; } opts.compStackCheckOnRet = (dwJitStackChecks & DWORD(STACK_CHECK_ON_RETURN)) != 0; #if defined(TARGET_X86) opts.compStackCheckOnCall = (dwJitStackChecks & DWORD(STACK_CHECK_ON_CALL)) != 0; #endif // defined(TARGET_X86) #endif // defined(DEBUG) && defined(TARGET_XARCH) #if MEASURE_MEM_ALLOC s_dspMemStats = (JitConfig.DisplayMemStats() != 0); #endif #ifdef PROFILING_SUPPORTED opts.compNoPInvokeInlineCB = jitFlags->IsSet(JitFlags::JIT_FLAG_PROF_NO_PINVOKE_INLINE); // Cache the profiler handle if (jitFlags->IsSet(JitFlags::JIT_FLAG_PROF_ENTERLEAVE)) { bool hookNeeded; bool indirected; info.compCompHnd->GetProfilingHandle(&hookNeeded, &compProfilerMethHnd, &indirected); compProfilerHookNeeded = !!hookNeeded; compProfilerMethHndIndirected = !!indirected; } else { compProfilerHookNeeded = false; compProfilerMethHnd = nullptr; compProfilerMethHndIndirected = false; } // Honour COMPlus_JitELTHookEnabled or STRESS_PROFILER_CALLBACKS stress mode // only if VM has not asked us to generate profiler hooks in the first place. // That is, override VM only if it hasn't asked for a profiler callback for this method. // Don't run this stress mode when pre-JITing, as we would need to emit a relocation // for the call to the fake ELT hook, which wouldn't make sense, as we can't store that // in the pre-JIT image. if (!compProfilerHookNeeded) { if ((JitConfig.JitELTHookEnabled() != 0) || (!jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && compStressCompile(STRESS_PROFILER_CALLBACKS, 5))) { opts.compJitELTHookEnabled = true; } } // TBD: Exclude PInvoke stubs if (opts.compJitELTHookEnabled) { compProfilerMethHnd = (void*)DummyProfilerELTStub; compProfilerMethHndIndirected = false; } #endif // PROFILING_SUPPORTED #if FEATURE_TAILCALL_OPT const WCHAR* strTailCallOpt = JitConfig.TailCallOpt(); if (strTailCallOpt != nullptr) { opts.compTailCallOpt = (UINT)_wtoi(strTailCallOpt) != 0; } if (JitConfig.TailCallLoopOpt() == 0) { opts.compTailCallLoopOpt = false; } #endif #if FEATURE_FASTTAILCALL if (JitConfig.FastTailCalls() == 0) { opts.compFastTailCalls = false; } #endif // FEATURE_FASTTAILCALL #ifdef CONFIGURABLE_ARM_ABI opts.compUseSoftFP = jitFlags->IsSet(JitFlags::JIT_FLAG_SOFTFP_ABI); unsigned int softFPConfig = opts.compUseSoftFP ? 2 : 1; unsigned int oldSoftFPConfig = InterlockedCompareExchange(&GlobalJitOptions::compUseSoftFPConfigured, softFPConfig, 0); if (oldSoftFPConfig != softFPConfig && oldSoftFPConfig != 0) { // There are no current scenarios where the abi can change during the lifetime of a process // that uses the JIT. If such a change occurs, either compFeatureHfa will need to change to a TLS static // or we will need to have some means to reset the flag safely. NO_WAY("SoftFP ABI setting changed during lifetime of process"); } GlobalJitOptions::compFeatureHfa = !opts.compUseSoftFP; #elif defined(ARM_SOFTFP) && defined(TARGET_ARM) // Armel is unconditionally enabled in the JIT. Verify that the VM side agrees. assert(jitFlags->IsSet(JitFlags::JIT_FLAG_SOFTFP_ABI)); #elif defined(TARGET_ARM) assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_SOFTFP_ABI)); #endif // CONFIGURABLE_ARM_ABI opts.compScopeInfo = opts.compDbgInfo; #ifdef LATE_DISASM codeGen->getDisAssembler().disOpenForLateDisAsm(info.compMethodName, info.compClassName, info.compMethodInfo->args.pSig); #endif //------------------------------------------------------------------------- opts.compReloc = jitFlags->IsSet(JitFlags::JIT_FLAG_RELOC); #ifdef DEBUG #if defined(TARGET_XARCH) // Whether encoding of absolute addr as PC-rel offset is enabled opts.compEnablePCRelAddr = (JitConfig.EnablePCRelAddr() != 0); #endif #endif // DEBUG opts.compProcedureSplitting = jitFlags->IsSet(JitFlags::JIT_FLAG_PROCSPLIT); #ifdef TARGET_ARM64 // TODO-ARM64-NYI: enable hot/cold splitting opts.compProcedureSplitting = false; #endif // TARGET_ARM64 #ifdef DEBUG opts.compProcedureSplittingEH = opts.compProcedureSplitting; #endif // DEBUG if (opts.compProcedureSplitting) { // Note that opts.compdbgCode is true under ngen for checked assemblies! opts.compProcedureSplitting = !opts.compDbgCode; #ifdef DEBUG // JitForceProcedureSplitting is used to force procedure splitting on checked assemblies. // This is useful for debugging on a checked build. Note that we still only do procedure // splitting in the zapper. if (JitConfig.JitForceProcedureSplitting().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.compProcedureSplitting = true; } // JitNoProcedureSplitting will always disable procedure splitting. if (JitConfig.JitNoProcedureSplitting().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.compProcedureSplitting = false; } // // JitNoProcedureSplittingEH will disable procedure splitting in functions with EH. if (JitConfig.JitNoProcedureSplittingEH().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.compProcedureSplittingEH = false; } #endif } #ifdef DEBUG // Now, set compMaxUncheckedOffsetForNullObject for STRESS_NULL_OBJECT_CHECK if (compStressCompile(STRESS_NULL_OBJECT_CHECK, 30)) { compMaxUncheckedOffsetForNullObject = (size_t)JitConfig.JitMaxUncheckedOffset(); if (verbose) { printf("STRESS_NULL_OBJECT_CHECK: compMaxUncheckedOffsetForNullObject=0x%X\n", compMaxUncheckedOffsetForNullObject); } } if (verbose) { // If we are compiling for a specific tier, make that very obvious in the output. // Note that we don't expect multiple TIER flags to be set at one time, but there // is nothing preventing that. if (jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) { printf("OPTIONS: Tier-0 compilation (set COMPlus_TieredCompilation=0 to disable)\n"); } if (jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1)) { printf("OPTIONS: Tier-1 compilation\n"); } if (compSwitchedToOptimized) { printf("OPTIONS: Tier-0 compilation, switched to FullOpts\n"); } if (compSwitchedToMinOpts) { printf("OPTIONS: Tier-1/FullOpts compilation, switched to MinOpts\n"); } if (jitFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { printf("OPTIONS: OSR variant with entry point 0x%x\n", info.compILEntry); } printf("OPTIONS: compCodeOpt = %s\n", (opts.compCodeOpt == BLENDED_CODE) ? "BLENDED_CODE" : (opts.compCodeOpt == SMALL_CODE) ? "SMALL_CODE" : (opts.compCodeOpt == FAST_CODE) ? "FAST_CODE" : "UNKNOWN_CODE"); printf("OPTIONS: compDbgCode = %s\n", dspBool(opts.compDbgCode)); printf("OPTIONS: compDbgInfo = %s\n", dspBool(opts.compDbgInfo)); printf("OPTIONS: compDbgEnC = %s\n", dspBool(opts.compDbgEnC)); printf("OPTIONS: compProcedureSplitting = %s\n", dspBool(opts.compProcedureSplitting)); printf("OPTIONS: compProcedureSplittingEH = %s\n", dspBool(opts.compProcedureSplittingEH)); if (jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT) && fgHaveProfileData()) { printf("OPTIONS: optimized using %s profile data\n", pgoSourceToString(fgPgoSource)); } if (fgPgoFailReason != nullptr) { printf("OPTIONS: %s\n", fgPgoFailReason); } if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { printf("OPTIONS: Jit invoked for ngen\n"); } } #endif #ifdef PROFILING_SUPPORTED #ifdef UNIX_AMD64_ABI if (compIsProfilerHookNeeded()) { opts.compNeedToAlignFrame = true; } #endif // UNIX_AMD64_ABI #endif #if defined(DEBUG) && defined(TARGET_ARM64) if ((s_pJitMethodSet == nullptr) || s_pJitMethodSet->IsActiveMethod(info.compFullName, info.compMethodHash())) { opts.compJitSaveFpLrWithCalleeSavedRegisters = JitConfig.JitSaveFpLrWithCalleeSavedRegisters(); } #endif // defined(DEBUG) && defined(TARGET_ARM64) } #ifdef DEBUG bool Compiler::compJitHaltMethod() { /* This method returns true when we use an INS_BREAKPOINT to allow us to step into the generated native code */ /* Note that this these two "Jit" environment variables also work for ngen images */ if (JitConfig.JitHalt().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return true; } /* Use this Hash variant when there are a lot of method with the same name and different signatures */ unsigned fJitHashHaltVal = (unsigned)JitConfig.JitHashHalt(); if ((fJitHashHaltVal != (unsigned)-1) && (fJitHashHaltVal == info.compMethodHash())) { return true; } return false; } /***************************************************************************** * Should we use a "stress-mode" for the given stressArea. We have different * areas to allow the areas to be mixed in different combinations in * different methods. * 'weight' indicates how often (as a percentage) the area should be stressed. * It should reflect the usefulness:overhead ratio. */ const LPCWSTR Compiler::s_compStressModeNames[STRESS_COUNT + 1] = { #define STRESS_MODE(mode) W("STRESS_") W(#mode), STRESS_MODES #undef STRESS_MODE }; //------------------------------------------------------------------------ // compStressCompile: determine if a stress mode should be enabled // // Arguments: // stressArea - stress mode to possibly enable // weight - percent of time this mode should be turned on // (range 0 to 100); weight 0 effectively disables // // Returns: // true if this stress mode is enabled // // Notes: // Methods may be excluded from stress via name or hash. // // Particular stress modes may be disabled or forcibly enabled. // // With JitStress=2, some stress modes are enabled regardless of weight; // these modes are the ones after COUNT_VARN in the enumeration. // // For other modes or for nonzero JitStress values, stress will be // enabled selectively for roughly weight% of methods. // bool Compiler::compStressCompile(compStressArea stressArea, unsigned weight) { // This can be called early, before info is fully set up. if ((info.compMethodName == nullptr) || (info.compFullName == nullptr)) { return false; } // Inlinees defer to the root method for stress, so that we can // more easily isolate methods that cause stress failures. if (compIsForInlining()) { return impInlineRoot()->compStressCompile(stressArea, weight); } const bool doStress = compStressCompileHelper(stressArea, weight); if (doStress && !compActiveStressModes[stressArea]) { if (verbose) { printf("\n\n*** JitStress: %ws ***\n\n", s_compStressModeNames[stressArea]); } compActiveStressModes[stressArea] = 1; } return doStress; } //------------------------------------------------------------------------ // compStressCompileHelper: helper to determine if a stress mode should be enabled // // Arguments: // stressArea - stress mode to possibly enable // weight - percent of time this mode should be turned on // (range 0 to 100); weight 0 effectively disables // // Returns: // true if this stress mode is enabled // // Notes: // See compStressCompile // bool Compiler::compStressCompileHelper(compStressArea stressArea, unsigned weight) { if (!bRangeAllowStress) { return false; } if (!JitConfig.JitStressOnly().isEmpty() && !JitConfig.JitStressOnly().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return false; } // Does user explicitly prevent using this STRESS_MODE through the command line? const WCHAR* strStressModeNamesNot = JitConfig.JitStressModeNamesNot(); if ((strStressModeNamesNot != nullptr) && (wcsstr(strStressModeNamesNot, s_compStressModeNames[stressArea]) != nullptr)) { return false; } // Does user explicitly set this STRESS_MODE through the command line? const WCHAR* strStressModeNames = JitConfig.JitStressModeNames(); if (strStressModeNames != nullptr) { if (wcsstr(strStressModeNames, s_compStressModeNames[stressArea]) != nullptr) { return true; } // This stress mode name did not match anything in the stress // mode allowlist. If user has requested only enable mode, // don't allow this stress mode to turn on. const bool onlyEnableMode = JitConfig.JitStressModeNamesOnly() != 0; if (onlyEnableMode) { return false; } } // 0: No stress (Except when explicitly set in complus_JitStressModeNames) // !=2: Vary stress. Performance will be slightly/moderately degraded // 2: Check-all stress. Performance will be REALLY horrible const int stressLevel = getJitStressLevel(); assert(weight <= MAX_STRESS_WEIGHT); // Check for boundary conditions if (stressLevel == 0 || weight == 0) { return false; } // Should we allow unlimited stress ? if ((stressArea > STRESS_COUNT_VARN) && (stressLevel == 2)) { return true; } if (weight == MAX_STRESS_WEIGHT) { return true; } // Get a hash which can be compared with 'weight' assert(stressArea != 0); const unsigned hash = (info.compMethodHash() ^ stressArea ^ stressLevel) % MAX_STRESS_WEIGHT; assert(hash < MAX_STRESS_WEIGHT && weight <= MAX_STRESS_WEIGHT); return (hash < weight); } //------------------------------------------------------------------------ // compPromoteFewerStructs: helper to determine if the local // should not be promoted under a stress mode. // // Arguments: // lclNum - local number to test // // Returns: // true if this local should not be promoted. // // Notes: // Reject ~50% of the potential promotions if STRESS_PROMOTE_FEWER_STRUCTS is active. // bool Compiler::compPromoteFewerStructs(unsigned lclNum) { bool rejectThisPromo = false; const bool promoteLess = compStressCompile(STRESS_PROMOTE_FEWER_STRUCTS, 50); if (promoteLess) { rejectThisPromo = (((info.compMethodHash() ^ lclNum) & 1) == 0); } return rejectThisPromo; } #endif // DEBUG void Compiler::compInitDebuggingInfo() { #ifdef DEBUG if (verbose) { printf("*************** In compInitDebuggingInfo() for %s\n", info.compFullName); } #endif /*------------------------------------------------------------------------- * * Get hold of the local variable records, if there are any */ info.compVarScopesCount = 0; if (opts.compScopeInfo) { eeGetVars(); } compInitVarScopeMap(); if (opts.compScopeInfo || opts.compDbgCode) { compInitScopeLists(); } if (opts.compDbgCode && (info.compVarScopesCount > 0)) { /* Create a new empty basic block. fgExtendDbgLifetimes() may add initialization of variables which are in scope right from the start of the (real) first BB (and therefore artificially marked as alive) into this block. */ fgEnsureFirstBBisScratch(); fgNewStmtAtEnd(fgFirstBB, gtNewNothingNode()); JITDUMP("Debuggable code - Add new %s to perform initialization of variables\n", fgFirstBB->dspToString()); } /*------------------------------------------------------------------------- * * Read the stmt-offsets table and the line-number table */ info.compStmtOffsetsImplicit = ICorDebugInfo::NO_BOUNDARIES; // We can only report debug info for EnC at places where the stack is empty. // Actually, at places where there are not live temps. Else, we won't be able // to map between the old and the new versions correctly as we won't have // any info for the live temps. assert(!opts.compDbgEnC || !opts.compDbgInfo || 0 == (info.compStmtOffsetsImplicit & ~ICorDebugInfo::STACK_EMPTY_BOUNDARIES)); info.compStmtOffsetsCount = 0; if (opts.compDbgInfo) { /* Get hold of the line# records, if there are any */ eeGetStmtOffsets(); #ifdef DEBUG if (verbose) { printf("info.compStmtOffsetsCount = %d\n", info.compStmtOffsetsCount); printf("info.compStmtOffsetsImplicit = %04Xh", info.compStmtOffsetsImplicit); if (info.compStmtOffsetsImplicit) { printf(" ( "); if (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) { printf("STACK_EMPTY "); } if (info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) { printf("NOP "); } if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { printf("CALL_SITE "); } printf(")"); } printf("\n"); IL_OFFSET* pOffs = info.compStmtOffsets; for (unsigned i = 0; i < info.compStmtOffsetsCount; i++, pOffs++) { printf("%02d) IL_%04Xh\n", i, *pOffs); } } #endif } } void Compiler::compSetOptimizationLevel() { bool theMinOptsValue; #pragma warning(suppress : 4101) unsigned jitMinOpts; if (compIsForInlining()) { theMinOptsValue = impInlineInfo->InlinerCompiler->opts.MinOpts(); goto _SetMinOpts; } theMinOptsValue = false; if (opts.compFlags == CLFLG_MINOPT) { JITLOG((LL_INFO100, "CLFLG_MINOPT set for method %s\n", info.compFullName)); theMinOptsValue = true; } #ifdef DEBUG jitMinOpts = JitConfig.JitMinOpts(); if (!theMinOptsValue && (jitMinOpts > 0)) { // jitTotalMethodCompiled does not include the method that is being compiled now, so make +1. unsigned methodCount = Compiler::jitTotalMethodCompiled + 1; unsigned methodCountMask = methodCount & 0xFFF; unsigned kind = (jitMinOpts & 0xF000000) >> 24; switch (kind) { default: if (jitMinOpts <= methodCount) { if (verbose) { printf(" Optimizations disabled by JitMinOpts and methodCount\n"); } theMinOptsValue = true; } break; case 0xD: { unsigned firstMinopts = (jitMinOpts >> 12) & 0xFFF; unsigned secondMinopts = (jitMinOpts >> 0) & 0xFFF; if ((firstMinopts == methodCountMask) || (secondMinopts == methodCountMask)) { if (verbose) { printf("0xD: Optimizations disabled by JitMinOpts and methodCountMask\n"); } theMinOptsValue = true; } } break; case 0xE: { unsigned startMinopts = (jitMinOpts >> 12) & 0xFFF; unsigned endMinopts = (jitMinOpts >> 0) & 0xFFF; if ((startMinopts <= methodCountMask) && (endMinopts >= methodCountMask)) { if (verbose) { printf("0xE: Optimizations disabled by JitMinOpts and methodCountMask\n"); } theMinOptsValue = true; } } break; case 0xF: { unsigned bitsZero = (jitMinOpts >> 12) & 0xFFF; unsigned bitsOne = (jitMinOpts >> 0) & 0xFFF; if (((methodCountMask & bitsOne) == bitsOne) && ((~methodCountMask & bitsZero) == bitsZero)) { if (verbose) { printf("0xF: Optimizations disabled by JitMinOpts and methodCountMask\n"); } theMinOptsValue = true; } } break; } } if (!theMinOptsValue) { if (JitConfig.JitMinOptsName().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { theMinOptsValue = true; } } #if 0 // The code in this #if can be used to debug optimization issues according to method hash. // To use, uncomment, rebuild and set environment variables minoptshashlo and minoptshashhi. #ifdef DEBUG unsigned methHash = info.compMethodHash(); char* lostr = getenv("minoptshashlo"); unsigned methHashLo = 0; if (lostr != nullptr) { sscanf_s(lostr, "%x", &methHashLo); char* histr = getenv("minoptshashhi"); unsigned methHashHi = UINT32_MAX; if (histr != nullptr) { sscanf_s(histr, "%x", &methHashHi); if (methHash >= methHashLo && methHash <= methHashHi) { printf("MinOpts for method %s, hash = %08x.\n", info.compFullName, methHash); printf(""); // in our logic this causes a flush theMinOptsValue = true; } } } #endif #endif if (compStressCompile(STRESS_MIN_OPTS, 5)) { theMinOptsValue = true; } // For PREJIT we never drop down to MinOpts // unless unless CLFLG_MINOPT is set else if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if ((unsigned)JitConfig.JitMinOptsCodeSize() < info.compILCodeSize) { JITLOG((LL_INFO10, "IL Code Size exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsInstrCount() < opts.instrCount) { JITLOG((LL_INFO10, "IL instruction count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsBbCount() < fgBBcount) { JITLOG((LL_INFO10, "Basic Block count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsLvNumCount() < lvaCount) { JITLOG((LL_INFO10, "Local Variable Num count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsLvRefCount() < opts.lvRefCount) { JITLOG((LL_INFO10, "Local Variable Ref count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } if (theMinOptsValue == true) { JITLOG((LL_INFO10000, "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count " "%3d,%3d for method %s\n", info.compILCodeSize, opts.instrCount, fgBBcount, lvaCount, opts.lvRefCount, info.compFullName)); if (JitConfig.JitBreakOnMinOpts() != 0) { assert(!"MinOpts enabled"); } } } #else // !DEBUG // Retail check if we should force Minopts due to the complexity of the method // For PREJIT we never drop down to MinOpts // unless unless CLFLG_MINOPT is set if (!theMinOptsValue && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && ((DEFAULT_MIN_OPTS_CODE_SIZE < info.compILCodeSize) || (DEFAULT_MIN_OPTS_INSTR_COUNT < opts.instrCount) || (DEFAULT_MIN_OPTS_BB_COUNT < fgBBcount) || (DEFAULT_MIN_OPTS_LV_NUM_COUNT < lvaCount) || (DEFAULT_MIN_OPTS_LV_REF_COUNT < opts.lvRefCount))) { theMinOptsValue = true; } #endif // DEBUG JITLOG((LL_INFO10000, "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count %3d,%3d for method %s\n", info.compILCodeSize, opts.instrCount, fgBBcount, lvaCount, opts.lvRefCount, info.compFullName)); #if 0 // The code in this #if has been useful in debugging loop cloning issues, by // enabling selective enablement of the loop cloning optimization according to // method hash. #ifdef DEBUG if (!theMinOptsValue) { unsigned methHash = info.compMethodHash(); char* lostr = getenv("opthashlo"); unsigned methHashLo = 0; if (lostr != NULL) { sscanf_s(lostr, "%x", &methHashLo); // methHashLo = (unsigned(atoi(lostr)) << 2); // So we don't have to use negative numbers. } char* histr = getenv("opthashhi"); unsigned methHashHi = UINT32_MAX; if (histr != NULL) { sscanf_s(histr, "%x", &methHashHi); // methHashHi = (unsigned(atoi(histr)) << 2); // So we don't have to use negative numbers. } if (methHash < methHashLo || methHash > methHashHi) { theMinOptsValue = true; } else { printf("Doing optimization in in %s (0x%x).\n", info.compFullName, methHash); } } #endif #endif _SetMinOpts: // Set the MinOpts value opts.SetMinOpts(theMinOptsValue); // Notify the VM if MinOpts is being used when not requested if (theMinOptsValue && !compIsForInlining() && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT) && !opts.compDbgCode) { info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_SWITCHED_TO_MIN_OPT); opts.jitFlags->Clear(JitFlags::JIT_FLAG_TIER1); compSwitchedToMinOpts = true; } #ifdef DEBUG if (verbose && !compIsForInlining()) { printf("OPTIONS: opts.MinOpts() == %s\n", opts.MinOpts() ? "true" : "false"); } #endif /* Control the optimizations */ if (opts.OptimizationDisabled()) { opts.compFlags &= ~CLFLG_MAXOPT; opts.compFlags |= CLFLG_MINOPT; } if (!compIsForInlining()) { codeGen->setFramePointerRequired(false); codeGen->setFrameRequired(false); if (opts.OptimizationDisabled()) { codeGen->setFrameRequired(true); } #if !defined(TARGET_AMD64) // The VM sets JitFlags::JIT_FLAG_FRAMED for two reasons: (1) the COMPlus_JitFramed variable is set, or // (2) the function is marked "noinline". The reason for #2 is that people mark functions // noinline to ensure the show up on in a stack walk. But for AMD64, we don't need a frame // pointer for the frame to show up in stack walk. if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_FRAMED)) codeGen->setFrameRequired(true); #endif if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { // The JIT doesn't currently support loop alignment for prejitted images. // (The JIT doesn't know the final address of the code, hence // it can't align code based on unknown addresses.) codeGen->SetAlignLoops(false); // loop alignment not supported for prejitted code } else { codeGen->SetAlignLoops(JitConfig.JitAlignLoops() == 1); } } #if TARGET_ARM // A single JitStress=1 Linux ARM32 test fails when we expand virtual calls early // JIT\HardwareIntrinsics\General\Vector128_1\Vector128_1_ro // opts.compExpandCallsEarly = (JitConfig.JitExpandCallsEarly() == 2); #else opts.compExpandCallsEarly = (JitConfig.JitExpandCallsEarly() != 0); #endif fgCanRelocateEHRegions = true; } #ifdef TARGET_ARMARCH // Function compRsvdRegCheck: // given a curState to use for calculating the total frame size // it will return true if the REG_OPT_RSVD should be reserved so // that it can be use to form large offsets when accessing stack // based LclVar including both incoming and out going argument areas. // // The method advances the frame layout state to curState by calling // lvaFrameSize(curState). // bool Compiler::compRsvdRegCheck(FrameLayoutState curState) { // Always do the layout even if returning early. Callers might // depend on us to do the layout. unsigned frameSize = lvaFrameSize(curState); JITDUMP("\n" "compRsvdRegCheck\n" " frame size = %6d\n" " compArgSize = %6d\n", frameSize, compArgSize); if (opts.MinOpts()) { // Have a recovery path in case we fail to reserve REG_OPT_RSVD and go // over the limit of SP and FP offset ranges due to large // temps. JITDUMP(" Returning true (MinOpts)\n\n"); return true; } unsigned calleeSavedRegMaxSz = CALLEE_SAVED_REG_MAXSZ; if (compFloatingPointUsed) { calleeSavedRegMaxSz += CALLEE_SAVED_FLOAT_MAXSZ; } calleeSavedRegMaxSz += REGSIZE_BYTES; // we always push LR. See genPushCalleeSavedRegisters noway_assert(frameSize >= calleeSavedRegMaxSz); #if defined(TARGET_ARM64) // TODO-ARM64-CQ: update this! JITDUMP(" Returning true (ARM64)\n\n"); return true; // just always assume we'll need it, for now #else // TARGET_ARM // frame layout: // // ... high addresses ... // frame contents size // ------------------- ------------------------ // inArgs compArgSize (includes prespill) // caller SP ---> // prespill // LR REGSIZE_BYTES // R11 ---> R11 REGSIZE_BYTES // callee saved regs CALLEE_SAVED_REG_MAXSZ (32 bytes) // optional saved fp regs CALLEE_SAVED_FLOAT_MAXSZ (64 bytes) // lclSize // incl. TEMPS MAX_SPILL_TEMP_SIZE // incl. outArgs // SP ---> // ... low addresses ... // // When codeGen->isFramePointerRequired is true, R11 will be established as a frame pointer. // We can then use R11 to access incoming args with positive offsets, and LclVars with // negative offsets. // // In functions with EH, in the non-funclet (or main) region, even though we will have a // frame pointer, we can use SP with positive offsets to access any or all locals or arguments // that we can reach with SP-relative encodings. The funclet region might require the reserved // register, since it must use offsets from R11 to access the parent frame. unsigned maxR11PositiveEncodingOffset = compFloatingPointUsed ? 0x03FC : 0x0FFF; JITDUMP(" maxR11PositiveEncodingOffset = %6d\n", maxR11PositiveEncodingOffset); // Floating point load/store instructions (VLDR/VSTR) can address up to -0x3FC from R11, but we // don't know if there are either no integer locals, or if we don't need large negative offsets // for the integer locals, so we must use the integer max negative offset, which is a // smaller (absolute value) number. unsigned maxR11NegativeEncodingOffset = 0x00FF; // This is a negative offset from R11. JITDUMP(" maxR11NegativeEncodingOffset = %6d\n", maxR11NegativeEncodingOffset); // -1 because otherwise we are computing the address just beyond the last argument, which we don't need to do. unsigned maxR11PositiveOffset = compArgSize + (2 * REGSIZE_BYTES) - 1; JITDUMP(" maxR11PositiveOffset = %6d\n", maxR11PositiveOffset); // The value is positive, but represents a negative offset from R11. // frameSize includes callee-saved space for R11 and LR, which are at non-negative offsets from R11 // (+0 and +4, respectively), so don't include those in the max possible negative offset. assert(frameSize >= (2 * REGSIZE_BYTES)); unsigned maxR11NegativeOffset = frameSize - (2 * REGSIZE_BYTES); JITDUMP(" maxR11NegativeOffset = %6d\n", maxR11NegativeOffset); if (codeGen->isFramePointerRequired()) { if (maxR11NegativeOffset > maxR11NegativeEncodingOffset) { JITDUMP(" Returning true (frame required and maxR11NegativeOffset)\n\n"); return true; } if (maxR11PositiveOffset > maxR11PositiveEncodingOffset) { JITDUMP(" Returning true (frame required and maxR11PositiveOffset)\n\n"); return true; } } // Now consider the SP based frame case. Note that we will use SP based offsets to access the stack in R11 based // frames in the non-funclet main code area. unsigned maxSPPositiveEncodingOffset = compFloatingPointUsed ? 0x03FC : 0x0FFF; JITDUMP(" maxSPPositiveEncodingOffset = %6d\n", maxSPPositiveEncodingOffset); // -1 because otherwise we are computing the address just beyond the last argument, which we don't need to do. assert(compArgSize + frameSize > 0); unsigned maxSPPositiveOffset = compArgSize + frameSize - 1; if (codeGen->isFramePointerUsed()) { // We have a frame pointer, so we can use it to access part of the stack, even if SP can't reach those parts. // We will still generate SP-relative offsets if SP can reach. // First, check that the stack between R11 and SP can be fully reached, either via negative offset from FP // or positive offset from SP. Don't count stored R11 or LR, which are reached from positive offsets from FP. unsigned maxSPLocalsCombinedOffset = frameSize - (2 * REGSIZE_BYTES) - 1; JITDUMP(" maxSPLocalsCombinedOffset = %6d\n", maxSPLocalsCombinedOffset); if (maxSPLocalsCombinedOffset > maxSPPositiveEncodingOffset) { // Can R11 help? unsigned maxRemainingLocalsCombinedOffset = maxSPLocalsCombinedOffset - maxSPPositiveEncodingOffset; JITDUMP(" maxRemainingLocalsCombinedOffset = %6d\n", maxRemainingLocalsCombinedOffset); if (maxRemainingLocalsCombinedOffset > maxR11NegativeEncodingOffset) { JITDUMP(" Returning true (frame pointer exists; R11 and SP can't reach entire stack between them)\n\n"); return true; } // Otherwise, yes, we can address the remaining parts of the locals frame with negative offsets from R11. } // Check whether either R11 or SP can access the arguments. if ((maxR11PositiveOffset > maxR11PositiveEncodingOffset) && (maxSPPositiveOffset > maxSPPositiveEncodingOffset)) { JITDUMP(" Returning true (frame pointer exists; R11 and SP can't reach all arguments)\n\n"); return true; } } else { if (maxSPPositiveOffset > maxSPPositiveEncodingOffset) { JITDUMP(" Returning true (no frame pointer exists; SP can't reach all of frame)\n\n"); return true; } } // We won't need to reserve REG_OPT_RSVD. // JITDUMP(" Returning false\n\n"); return false; #endif // TARGET_ARM } #endif // TARGET_ARMARCH //------------------------------------------------------------------------ // compGetTieringName: get a string describing tiered compilation settings // for this method // // Arguments: // wantShortName - true if a short name is ok (say for using in file names) // // Returns: // String describing tiering decisions for this method, including cases // where the jit codegen will differ from what the runtime requested. // const char* Compiler::compGetTieringName(bool wantShortName) const { const bool tier0 = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0); const bool tier1 = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1); assert(!tier0 || !tier1); // We don't expect multiple TIER flags to be set at one time. if (tier0) { return "Tier0"; } else if (tier1) { if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { return "Tier1-OSR"; } else { return "Tier1"; } } else if (opts.OptimizationEnabled()) { if (compSwitchedToOptimized) { return wantShortName ? "Tier0-FullOpts" : "Tier-0 switched to FullOpts"; } else { return "FullOpts"; } } else if (opts.MinOpts()) { if (compSwitchedToMinOpts) { if (compSwitchedToOptimized) { return wantShortName ? "Tier0-FullOpts-MinOpts" : "Tier-0 switched to FullOpts, then to MinOpts"; } else { return wantShortName ? "Tier0-MinOpts" : "Tier-0 switched MinOpts"; } } else { return "MinOpts"; } } else if (opts.compDbgCode) { return "Debug"; } else { return wantShortName ? "Unknown" : "Unknown optimization level"; } } //------------------------------------------------------------------------ // compGetStressMessage: get a string describing jitstress capability // for this method // // Returns: // An empty string if stress is not enabled, else a string describing // if this method is subject to stress or is excluded by name or hash. // const char* Compiler::compGetStressMessage() const { // Add note about stress where appropriate const char* stressMessage = ""; #ifdef DEBUG // Is stress enabled via mode name or level? if ((JitConfig.JitStressModeNames() != nullptr) || (getJitStressLevel() > 0)) { // Is the method being jitted excluded from stress via range? if (bRangeAllowStress) { // Or is it excluded via name? if (!JitConfig.JitStressOnly().isEmpty() || !JitConfig.JitStressOnly().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { // Not excluded -- stress can happen stressMessage = " JitStress"; } else { stressMessage = " NoJitStress(Only)"; } } else { stressMessage = " NoJitStress(Range)"; } } #endif // DEBUG return stressMessage; } void Compiler::compFunctionTraceStart() { #ifdef DEBUG if (compIsForInlining()) { return; } if ((JitConfig.JitFunctionTrace() != 0) && !opts.disDiffable) { LONG newJitNestingLevel = InterlockedIncrement(&Compiler::jitNestingLevel); if (newJitNestingLevel <= 0) { printf("{ Illegal nesting level %d }\n", newJitNestingLevel); } for (LONG i = 0; i < newJitNestingLevel - 1; i++) { printf(" "); } printf("{ Start Jitting Method %4d %s (MethodHash=%08x) %s\n", Compiler::jitTotalMethodCompiled, info.compFullName, info.compMethodHash(), compGetTieringName()); /* } editor brace matching workaround for this printf */ } #endif // DEBUG } void Compiler::compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI) { #ifdef DEBUG assert(!compIsForInlining()); if ((JitConfig.JitFunctionTrace() != 0) && !opts.disDiffable) { LONG newJitNestingLevel = InterlockedDecrement(&Compiler::jitNestingLevel); if (newJitNestingLevel < 0) { printf("{ Illegal nesting level %d }\n", newJitNestingLevel); } for (LONG i = 0; i < newJitNestingLevel; i++) { printf(" "); } // Note: that is incorrect if we are compiling several methods at the same time. unsigned methodNumber = Compiler::jitTotalMethodCompiled - 1; /* { editor brace-matching workaround for following printf */ printf("} Jitted Method %4d at" FMT_ADDR "method %s size %08x%s%s\n", methodNumber, DBG_ADDR(methodCodePtr), info.compFullName, methodCodeSize, isNYI ? " NYI" : (compIsForImportOnly() ? " import only" : ""), opts.altJit ? " altjit" : ""); } #endif // DEBUG } //------------------------------------------------------------------------ // BeginPhase: begin execution of a phase // // Arguments: // phase - the phase that is about to begin // void Compiler::BeginPhase(Phases phase) { mostRecentlyActivePhase = phase; } //------------------------------------------------------------------------ // EndPhase: finish execution of a phase // // Arguments: // phase - the phase that has just finished // void Compiler::EndPhase(Phases phase) { #if defined(FEATURE_JIT_METHOD_PERF) if (pCompJitTimer != nullptr) { pCompJitTimer->EndPhase(this, phase); } #endif mostRecentlyActivePhase = phase; } //------------------------------------------------------------------------ // compCompile: run phases needed for compilation // // Arguments: // methodCodePtr [OUT] - address of generated code // methodCodeSize [OUT] - size of the generated code (hot + cold setions) // compileFlags [IN] - flags controlling jit behavior // // Notes: // This is the most interesting 'toplevel' function in the JIT. It goes through the operations of // importing, morphing, optimizations and code generation. This is called from the EE through the // code:CILJit::compileMethod function. // // For an overview of the structure of the JIT, see: // https://github.com/dotnet/runtime/blob/main/docs/design/coreclr/jit/ryujit-overview.md // // Also called for inlinees, though they will only be run through the first few phases. // void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags) { // Prepare for importation // auto preImportPhase = [this]() { if (compIsForInlining()) { // Notify root instance that an inline attempt is about to import IL impInlineRoot()->m_inlineStrategy->NoteImport(); } hashBv::Init(this); VarSetOps::AssignAllowUninitRhs(this, compCurLife, VarSetOps::UninitVal()); // The temp holding the secret stub argument is used by fgImport() when importing the intrinsic. if (info.compPublishStubParam) { assert(lvaStubArgumentVar == BAD_VAR_NUM); lvaStubArgumentVar = lvaGrabTempWithImplicitUse(false DEBUGARG("stub argument")); lvaGetDesc(lvaStubArgumentVar)->lvType = TYP_I_IMPL; // TODO-CQ: there is no need to mark it as doNotEnreg. There are no stores for this local // before codegen so liveness and LSRA mark it as "liveIn" and always allocate a stack slot for it. // However, it would be better to process it like other argument locals and keep it in // a reg for the whole method without spilling to the stack when possible. lvaSetVarDoNotEnregister(lvaStubArgumentVar DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } }; DoPhase(this, PHASE_PRE_IMPORT, preImportPhase); compFunctionTraceStart(); // Incorporate profile data. // // Note: the importer is sensitive to block weights, so this has // to happen before importation. // DoPhase(this, PHASE_INCPROFILE, &Compiler::fgIncorporateProfileData); // If we're going to instrument code, we may need to prepare before // we import. // if (compileFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { DoPhase(this, PHASE_IBCPREP, &Compiler::fgPrepareToInstrumentMethod); } // Import: convert the instrs in each basic block to a tree based intermediate representation // DoPhase(this, PHASE_IMPORTATION, &Compiler::fgImport); // Expand any patchpoints // DoPhase(this, PHASE_PATCHPOINTS, &Compiler::fgTransformPatchpoints); // If instrumenting, add block and class probes. // if (compileFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { DoPhase(this, PHASE_IBCINSTR, &Compiler::fgInstrumentMethod); } // Transform indirect calls that require control flow expansion. // DoPhase(this, PHASE_INDXCALL, &Compiler::fgTransformIndirectCalls); // PostImportPhase: cleanup inlinees // auto postImportPhase = [this]() { // If this is a viable inline candidate if (compIsForInlining() && !compDonotInline()) { // Filter out unimported BBs in the inlinee // fgPostImportationCleanup(); // Update type of return spill temp if we have gathered // better info when importing the inlinee, and the return // spill temp is single def. if (fgNeedReturnSpillTemp()) { CORINFO_CLASS_HANDLE retExprClassHnd = impInlineInfo->retExprClassHnd; if (retExprClassHnd != nullptr) { LclVarDsc* returnSpillVarDsc = lvaGetDesc(lvaInlineeReturnSpillTemp); if (returnSpillVarDsc->lvSingleDef) { lvaUpdateClass(lvaInlineeReturnSpillTemp, retExprClassHnd, impInlineInfo->retExprClassHndIsExact); } } } } }; DoPhase(this, PHASE_POST_IMPORT, postImportPhase); // If we're importing for inlining, we're done. if (compIsForInlining()) { #ifdef FEATURE_JIT_METHOD_PERF if (pCompJitTimer != nullptr) { #if MEASURE_CLRAPI_CALLS EndPhase(PHASE_CLR_API); #endif pCompJitTimer->Terminate(this, CompTimeSummaryInfo::s_compTimeSummary, false); } #endif return; } // At this point in the phase list, all the inlinee phases have // been run, and inlinee compiles have exited, so we should only // get this far if we are jitting the root method. noway_assert(!compIsForInlining()); // Maybe the caller was not interested in generating code if (compIsForImportOnly()) { compFunctionTraceEnd(nullptr, 0, false); return; } #if !FEATURE_EH // If we aren't yet supporting EH in a compiler bring-up, remove as many EH handlers as possible, so // we can pass tests that contain try/catch EH, but don't actually throw any exceptions. fgRemoveEH(); #endif // !FEATURE_EH // We could allow ESP frames. Just need to reserve space for // pushing EBP if the method becomes an EBP-frame after an edit. // Note that requiring a EBP Frame disallows double alignment. Thus if we change this // we either have to disallow double alignment for E&C some other way or handle it in EETwain. if (opts.compDbgEnC) { codeGen->setFramePointerRequired(true); // We don't care about localloc right now. If we do support it, // EECodeManager::FixContextForEnC() needs to handle it smartly // in case the localloc was actually executed. // // compLocallocUsed = true; } // Start phases that are broadly called morphing, and includes // global morph, as well as other phases that massage the trees so // that we can generate code out of them. // auto morphInitPhase = [this]() { // Initialize the BlockSet epoch NewBasicBlockEpoch(); fgOutgoingArgTemps = nullptr; // Insert call to class constructor as the first basic block if // we were asked to do so. if (info.compCompHnd->initClass(nullptr /* field */, nullptr /* method */, impTokenLookupContextHandle /* context */) & CORINFO_INITCLASS_USE_HELPER) { fgEnsureFirstBBisScratch(); fgNewStmtAtBeg(fgFirstBB, fgInitThisClass()); } #ifdef DEBUG if (opts.compGcChecks) { for (unsigned i = 0; i < info.compArgsCount; i++) { if (lvaGetDesc(i)->TypeGet() == TYP_REF) { // confirm that the argument is a GC pointer (for debugging (GC stress)) GenTree* op = gtNewLclvNode(i, TYP_REF); GenTreeCall::Use* args = gtNewCallArgs(op); op = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_VOID, args); fgEnsureFirstBBisScratch(); fgNewStmtAtEnd(fgFirstBB, op); if (verbose) { printf("\ncompGcChecks tree:\n"); gtDispTree(op); } } } } #endif // DEBUG #if defined(DEBUG) && defined(TARGET_XARCH) if (opts.compStackCheckOnRet) { lvaReturnSpCheck = lvaGrabTempWithImplicitUse(false DEBUGARG("ReturnSpCheck")); lvaSetVarDoNotEnregister(lvaReturnSpCheck, DoNotEnregisterReason::ReturnSpCheck); lvaGetDesc(lvaReturnSpCheck)->lvType = TYP_I_IMPL; } #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) if (opts.compStackCheckOnCall) { lvaCallSpCheck = lvaGrabTempWithImplicitUse(false DEBUGARG("CallSpCheck")); lvaGetDesc(lvaCallSpCheck)->lvType = TYP_I_IMPL; } #endif // defined(DEBUG) && defined(TARGET_X86) // Update flow graph after importation. // Removes un-imported blocks, trims EH, and ensures correct OSR entry flow. // fgPostImportationCleanup(); }; DoPhase(this, PHASE_MORPH_INIT, morphInitPhase); #ifdef DEBUG // Inliner could add basic blocks. Check that the flowgraph data is up-to-date fgDebugCheckBBlist(false, false); #endif // DEBUG // Inline callee methods into this root method // DoPhase(this, PHASE_MORPH_INLINE, &Compiler::fgInline); // Record "start" values for post-inlining cycles and elapsed time. RecordStateAtEndOfInlining(); // Transform each GT_ALLOCOBJ node into either an allocation helper call or // local variable allocation on the stack. ObjectAllocator objectAllocator(this); // PHASE_ALLOCATE_OBJECTS if (compObjectStackAllocation() && opts.OptimizationEnabled()) { objectAllocator.EnableObjectStackAllocation(); } objectAllocator.Run(); // Add any internal blocks/trees we may need // DoPhase(this, PHASE_MORPH_ADD_INTERNAL, &Compiler::fgAddInternal); // Remove empty try regions // DoPhase(this, PHASE_EMPTY_TRY, &Compiler::fgRemoveEmptyTry); // Remove empty finally regions // DoPhase(this, PHASE_EMPTY_FINALLY, &Compiler::fgRemoveEmptyFinally); // Streamline chains of finally invocations // DoPhase(this, PHASE_MERGE_FINALLY_CHAINS, &Compiler::fgMergeFinallyChains); // Clone code in finallys to reduce overhead for non-exceptional paths // DoPhase(this, PHASE_CLONE_FINALLY, &Compiler::fgCloneFinally); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Update finally target flags after EH optimizations // DoPhase(this, PHASE_UPDATE_FINALLY_FLAGS, &Compiler::fgUpdateFinallyTargetFlags); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #if DEBUG if (lvaEnregEHVars) { unsigned methHash = info.compMethodHash(); char* lostr = getenv("JitEHWTHashLo"); unsigned methHashLo = 0; bool dump = false; if (lostr != nullptr) { sscanf_s(lostr, "%x", &methHashLo); dump = true; } char* histr = getenv("JitEHWTHashHi"); unsigned methHashHi = UINT32_MAX; if (histr != nullptr) { sscanf_s(histr, "%x", &methHashHi); dump = true; } if (methHash < methHashLo || methHash > methHashHi) { lvaEnregEHVars = false; } else if (dump) { printf("Enregistering EH Vars for method %s, hash = 0x%x.\n", info.compFullName, info.compMethodHash()); printf(""); // flush } } if (lvaEnregMultiRegVars) { unsigned methHash = info.compMethodHash(); char* lostr = getenv("JitMultiRegHashLo"); unsigned methHashLo = 0; bool dump = false; if (lostr != nullptr) { sscanf_s(lostr, "%x", &methHashLo); dump = true; } char* histr = getenv("JitMultiRegHashHi"); unsigned methHashHi = UINT32_MAX; if (histr != nullptr) { sscanf_s(histr, "%x", &methHashHi); dump = true; } if (methHash < methHashLo || methHash > methHashHi) { lvaEnregMultiRegVars = false; } else if (dump) { printf("Enregistering MultiReg Vars for method %s, hash = 0x%x.\n", info.compFullName, info.compMethodHash()); printf(""); // flush } } #endif // Compute bbNum, bbRefs and bbPreds // // This is the first time full (not cheap) preds will be computed. // And, if we have profile data, we can now check integrity. // // From this point on the flowgraph information such as bbNum, // bbRefs or bbPreds has to be kept updated. // auto computePredsPhase = [this]() { JITDUMP("\nRenumbering the basic blocks for fgComputePred\n"); fgRenumberBlocks(); noway_assert(!fgComputePredsDone); fgComputePreds(); }; DoPhase(this, PHASE_COMPUTE_PREDS, computePredsPhase); // Now that we have pred lists, do some flow-related optimizations // if (opts.OptimizationEnabled()) { // Merge common throw blocks // DoPhase(this, PHASE_MERGE_THROWS, &Compiler::fgTailMergeThrows); // Run an early flow graph simplification pass // auto earlyUpdateFlowGraphPhase = [this]() { constexpr bool doTailDup = false; fgUpdateFlowGraph(doTailDup); }; DoPhase(this, PHASE_EARLY_UPDATE_FLOW_GRAPH, earlyUpdateFlowGraphPhase); } // Promote struct locals // auto promoteStructsPhase = [this]() { // For x64 and ARM64 we need to mark irregular parameters lvaRefCountState = RCS_EARLY; fgResetImplicitByRefRefCount(); fgPromoteStructs(); }; DoPhase(this, PHASE_PROMOTE_STRUCTS, promoteStructsPhase); // Figure out what locals are address-taken. // DoPhase(this, PHASE_STR_ADRLCL, &Compiler::fgMarkAddressExposedLocals); // Run a simple forward substitution pass. // DoPhase(this, PHASE_FWD_SUB, &Compiler::fgForwardSub); // Apply the type update to implicit byref parameters; also choose (based on address-exposed // analysis) which implicit byref promotions to keep (requires copy to initialize) or discard. // DoPhase(this, PHASE_MORPH_IMPBYREF, &Compiler::fgRetypeImplicitByRefArgs); #ifdef DEBUG // Now that locals have address-taken and implicit byref marked, we can safely apply stress. lvaStressLclFld(); fgStress64RsltMul(); #endif // DEBUG // Morph the trees in all the blocks of the method // auto morphGlobalPhase = [this]() { unsigned prevBBCount = fgBBcount; fgMorphBlocks(); // Fix any LclVar annotations on discarded struct promotion temps for implicit by-ref args fgMarkDemotedImplicitByRefArgs(); lvaRefCountState = RCS_INVALID; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (fgNeedToAddFinallyTargetBits) { // We previously wiped out the BBF_FINALLY_TARGET bits due to some morphing; add them back. fgAddFinallyTargetFlags(); fgNeedToAddFinallyTargetBits = false; } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Decide the kind of code we want to generate fgSetOptions(); fgExpandQmarkNodes(); #ifdef DEBUG compCurBB = nullptr; #endif // DEBUG // If we needed to create any new BasicBlocks then renumber the blocks if (fgBBcount > prevBBCount) { fgRenumberBlocks(); } // We can now enable all phase checking activePhaseChecks = PhaseChecks::CHECK_ALL; }; DoPhase(this, PHASE_MORPH_GLOBAL, morphGlobalPhase); // GS security checks for unsafe buffers // auto gsPhase = [this]() { unsigned prevBBCount = fgBBcount; if (getNeedsGSSecurityCookie()) { gsGSChecksInitCookie(); if (compGSReorderStackLayout) { gsCopyShadowParams(); } // If we needed to create any new BasicBlocks then renumber the blocks if (fgBBcount > prevBBCount) { fgRenumberBlocks(); } } else { JITDUMP("No GS security needed\n"); } }; DoPhase(this, PHASE_GS_COOKIE, gsPhase); // Compute the block and edge weights // DoPhase(this, PHASE_COMPUTE_EDGE_WEIGHTS, &Compiler::fgComputeBlockAndEdgeWeights); #if defined(FEATURE_EH_FUNCLETS) // Create funclets from the EH handlers. // DoPhase(this, PHASE_CREATE_FUNCLETS, &Compiler::fgCreateFunclets); #endif // FEATURE_EH_FUNCLETS if (opts.OptimizationEnabled()) { // Invert loops // DoPhase(this, PHASE_INVERT_LOOPS, &Compiler::optInvertLoops); // Optimize block order // DoPhase(this, PHASE_OPTIMIZE_LAYOUT, &Compiler::optOptimizeLayout); // Compute reachability sets and dominators. // DoPhase(this, PHASE_COMPUTE_REACHABILITY, &Compiler::fgComputeReachability); // Scale block weights and mark run rarely blocks. // DoPhase(this, PHASE_SET_BLOCK_WEIGHTS, &Compiler::optSetBlockWeights); // Discover and classify natural loops (e.g. mark iterative loops as such). Also marks loop blocks // and sets bbWeight to the loop nesting levels. // DoPhase(this, PHASE_FIND_LOOPS, &Compiler::optFindLoopsPhase); // Clone loops with optimization opportunities, and choose one based on dynamic condition evaluation. // DoPhase(this, PHASE_CLONE_LOOPS, &Compiler::optCloneLoops); // Unroll loops // DoPhase(this, PHASE_UNROLL_LOOPS, &Compiler::optUnrollLoops); // Clear loop table info that is not used after this point, and might become invalid. // DoPhase(this, PHASE_CLEAR_LOOP_INFO, &Compiler::optClearLoopIterInfo); } #ifdef DEBUG fgDebugCheckLinks(); #endif // Create the variable table (and compute variable ref counts) // DoPhase(this, PHASE_MARK_LOCAL_VARS, &Compiler::lvaMarkLocalVars); // IMPORTANT, after this point, locals are ref counted. // However, ref counts are not kept incrementally up to date. assert(lvaLocalVarRefCounted()); if (opts.OptimizationEnabled()) { // Optimize boolean conditions // DoPhase(this, PHASE_OPTIMIZE_BOOLS, &Compiler::optOptimizeBools); // optOptimizeBools() might have changed the number of blocks; the dominators/reachability might be bad. } // Figure out the order in which operators are to be evaluated // DoPhase(this, PHASE_FIND_OPER_ORDER, &Compiler::fgFindOperOrder); // Weave the tree lists. Anyone who modifies the tree shapes after // this point is responsible for calling fgSetStmtSeq() to keep the // nodes properly linked. // This can create GC poll calls, and create new BasicBlocks (without updating dominators/reachability). // DoPhase(this, PHASE_SET_BLOCK_ORDER, &Compiler::fgSetBlockOrder); // At this point we know if we are fully interruptible or not if (opts.OptimizationEnabled()) { bool doSsa = true; bool doEarlyProp = true; bool doValueNum = true; bool doLoopHoisting = true; bool doCopyProp = true; bool doBranchOpt = true; bool doAssertionProp = true; bool doRangeAnalysis = true; int iterations = 1; #if defined(OPT_CONFIG) doSsa = (JitConfig.JitDoSsa() != 0); doEarlyProp = doSsa && (JitConfig.JitDoEarlyProp() != 0); doValueNum = doSsa && (JitConfig.JitDoValueNumber() != 0); doLoopHoisting = doValueNum && (JitConfig.JitDoLoopHoisting() != 0); doCopyProp = doValueNum && (JitConfig.JitDoCopyProp() != 0); doBranchOpt = doValueNum && (JitConfig.JitDoRedundantBranchOpts() != 0); doAssertionProp = doValueNum && (JitConfig.JitDoAssertionProp() != 0); doRangeAnalysis = doAssertionProp && (JitConfig.JitDoRangeAnalysis() != 0); if (opts.optRepeat) { iterations = JitConfig.JitOptRepeatCount(); } #endif // defined(OPT_CONFIG) while (iterations > 0) { if (doSsa) { // Build up SSA form for the IR // DoPhase(this, PHASE_BUILD_SSA, &Compiler::fgSsaBuild); } if (doEarlyProp) { // Propagate array length and rewrite getType() method call // DoPhase(this, PHASE_EARLY_PROP, &Compiler::optEarlyProp); } if (doValueNum) { // Value number the trees // DoPhase(this, PHASE_VALUE_NUMBER, &Compiler::fgValueNumber); } if (doLoopHoisting) { // Hoist invariant code out of loops // DoPhase(this, PHASE_HOIST_LOOP_CODE, &Compiler::optHoistLoopCode); } if (doCopyProp) { // Perform VN based copy propagation // DoPhase(this, PHASE_VN_COPY_PROP, &Compiler::optVnCopyProp); } if (doBranchOpt) { DoPhase(this, PHASE_OPTIMIZE_BRANCHES, &Compiler::optRedundantBranches); } // Remove common sub-expressions // DoPhase(this, PHASE_OPTIMIZE_VALNUM_CSES, &Compiler::optOptimizeCSEs); if (doAssertionProp) { // Assertion propagation // DoPhase(this, PHASE_ASSERTION_PROP_MAIN, &Compiler::optAssertionPropMain); } if (doRangeAnalysis) { auto rangePhase = [this]() { RangeCheck rc(this); rc.OptimizeRangeChecks(); }; // Bounds check elimination via range analysis // DoPhase(this, PHASE_OPTIMIZE_INDEX_CHECKS, rangePhase); } if (fgModified) { // update the flowgraph if we modified it during the optimization phase // auto optUpdateFlowGraphPhase = [this]() { constexpr bool doTailDup = false; fgUpdateFlowGraph(doTailDup); }; DoPhase(this, PHASE_OPT_UPDATE_FLOW_GRAPH, optUpdateFlowGraphPhase); // Recompute the edge weight if we have modified the flow graph // DoPhase(this, PHASE_COMPUTE_EDGE_WEIGHTS2, &Compiler::fgComputeEdgeWeights); } // Iterate if requested, resetting annotations first. if (--iterations == 0) { break; } ResetOptAnnotations(); RecomputeLoopInfo(); } } // Insert GC Polls DoPhase(this, PHASE_INSERT_GC_POLLS, &Compiler::fgInsertGCPolls); // Determine start of cold region if we are hot/cold splitting // DoPhase(this, PHASE_DETERMINE_FIRST_COLD_BLOCK, &Compiler::fgDetermineFirstColdBlock); #ifdef DEBUG fgDebugCheckLinks(compStressCompile(STRESS_REMORPH_TREES, 50)); // Stash the current estimate of the function's size if necessary. if (verbose) { compSizeEstimate = 0; compCycleEstimate = 0; for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { compSizeEstimate += stmt->GetCostSz(); compCycleEstimate += stmt->GetCostEx(); } } } #endif // rationalize trees Rationalizer rat(this); // PHASE_RATIONALIZE rat.Run(); // Here we do "simple lowering". When the RyuJIT backend works for all // platforms, this will be part of the more general lowering phase. For now, though, we do a separate // pass of "final lowering." We must do this before (final) liveness analysis, because this creates // range check throw blocks, in which the liveness must be correct. // DoPhase(this, PHASE_SIMPLE_LOWERING, &Compiler::fgSimpleLowering); // Enable this to gather statistical data such as // call and register argument info, flowgraph and loop info, etc. compJitStats(); #ifdef TARGET_ARM if (compLocallocUsed) { // We reserve REG_SAVED_LOCALLOC_SP to store SP on entry for stack unwinding codeGen->regSet.rsMaskResvd |= RBM_SAVED_LOCALLOC_SP; } #endif // TARGET_ARM // Assign registers to variables, etc. /////////////////////////////////////////////////////////////////////////////// // Dominator and reachability sets are no longer valid. They haven't been // maintained up to here, and shouldn't be used (unless recomputed). /////////////////////////////////////////////////////////////////////////////// fgDomsComputed = false; // Create LinearScan before Lowering, so that Lowering can call LinearScan methods // for determining whether locals are register candidates and (for xarch) whether // a node is a containable memory op. m_pLinearScan = getLinearScanAllocator(this); // Lower // m_pLowering = new (this, CMK_LSRA) Lowering(this, m_pLinearScan); // PHASE_LOWERING m_pLowering->Run(); if (!compMacOsArm64Abi()) { // Set stack levels; this information is necessary for x86 // but on other platforms it is used only in asserts. // TODO: do not run it in release on other platforms, see https://github.com/dotnet/runtime/issues/42673. StackLevelSetter stackLevelSetter(this); stackLevelSetter.Run(); } // We can not add any new tracked variables after this point. lvaTrackedFixed = true; // Now that lowering is completed we can proceed to perform register allocation // auto linearScanPhase = [this]() { m_pLinearScan->doLinearScan(); }; DoPhase(this, PHASE_LINEAR_SCAN, linearScanPhase); // Copied from rpPredictRegUse() SetFullPtrRegMapRequired(codeGen->GetInterruptible() || !codeGen->isFramePointerUsed()); #if FEATURE_LOOP_ALIGN // Place loop alignment instructions DoPhase(this, PHASE_ALIGN_LOOPS, &Compiler::placeLoopAlignInstructions); #endif // Generate code codeGen->genGenerateCode(methodCodePtr, methodCodeSize); #if TRACK_LSRA_STATS if (JitConfig.DisplayLsraStats() == 2) { m_pLinearScan->dumpLsraStatsCsv(jitstdout); } #endif // TRACK_LSRA_STATS // We're done -- set the active phase to the last phase // (which isn't really a phase) mostRecentlyActivePhase = PHASE_POST_EMIT; #ifdef FEATURE_JIT_METHOD_PERF if (pCompJitTimer) { #if MEASURE_CLRAPI_CALLS EndPhase(PHASE_CLR_API); #else EndPhase(PHASE_POST_EMIT); #endif pCompJitTimer->Terminate(this, CompTimeSummaryInfo::s_compTimeSummary, true); } #endif // Generate PatchpointInfo generatePatchpointInfo(); RecordStateAtEndOfCompilation(); #ifdef FEATURE_TRACELOGGING compJitTelemetry.NotifyEndOfCompilation(); #endif #if defined(DEBUG) ++Compiler::jitTotalMethodCompiled; #endif // defined(DEBUG) compFunctionTraceEnd(*methodCodePtr, *methodCodeSize, false); JITDUMP("Method code size: %d\n", (unsigned)(*methodCodeSize)); #if FUNC_INFO_LOGGING if (compJitFuncInfoFile != nullptr) { assert(!compIsForInlining()); #ifdef DEBUG // We only have access to info.compFullName in DEBUG builds. fprintf(compJitFuncInfoFile, "%s\n", info.compFullName); #elif FEATURE_SIMD fprintf(compJitFuncInfoFile, " %s\n", eeGetMethodFullName(info.compMethodHnd)); #endif fprintf(compJitFuncInfoFile, ""); // in our logic this causes a flush } #endif // FUNC_INFO_LOGGING } #if FEATURE_LOOP_ALIGN //------------------------------------------------------------------------ // placeLoopAlignInstructions: Iterate over all the blocks and determine // the best position to place the 'align' instruction. Inserting 'align' // instructions after an unconditional branch is preferred over inserting // in the block before the loop. In case there are multiple blocks // having 'jmp', the one that has lower weight is preferred. // If the block having 'jmp' is hotter than the block before the loop, // the align will still be placed after 'jmp' because the processor should // be smart enough to not fetch extra instruction beyond jmp. // void Compiler::placeLoopAlignInstructions() { if (loopAlignCandidates == 0) { return; } int loopsToProcess = loopAlignCandidates; JITDUMP("Inside placeLoopAlignInstructions for %d loops.\n", loopAlignCandidates); // Add align only if there were any loops that needed alignment weight_t minBlockSoFar = BB_MAX_WEIGHT; BasicBlock* bbHavingAlign = nullptr; BasicBlock::loopNumber currentAlignedLoopNum = BasicBlock::NOT_IN_LOOP; if ((fgFirstBB != nullptr) && fgFirstBB->isLoopAlign()) { // Adding align instruction in prolog is not supported // hence just remove that loop from our list. loopsToProcess--; } for (BasicBlock* const block : Blocks()) { if (currentAlignedLoopNum != BasicBlock::NOT_IN_LOOP) { // We've been processing blocks within an aligned loop. Are we out of that loop now? if (currentAlignedLoopNum != block->bbNatLoopNum) { currentAlignedLoopNum = BasicBlock::NOT_IN_LOOP; } } // If there is a unconditional jump (which is not part of callf/always pair) if (opts.compJitHideAlignBehindJmp && (block->bbJumpKind == BBJ_ALWAYS) && !block->isBBCallAlwaysPairTail()) { // Track the lower weight blocks if (block->bbWeight < minBlockSoFar) { if (currentAlignedLoopNum == BasicBlock::NOT_IN_LOOP) { // Ok to insert align instruction in this block because it is not part of any aligned loop. minBlockSoFar = block->bbWeight; bbHavingAlign = block; JITDUMP(FMT_BB ", bbWeight=" FMT_WT " ends with unconditional 'jmp' \n", block->bbNum, block->bbWeight); } } } if ((block->bbNext != nullptr) && (block->bbNext->isLoopAlign())) { // If jmp was not found, then block before the loop start is where align instruction will be added. if (bbHavingAlign == nullptr) { bbHavingAlign = block; JITDUMP("Marking " FMT_BB " before the loop with BBF_HAS_ALIGN for loop at " FMT_BB "\n", block->bbNum, block->bbNext->bbNum); } else { JITDUMP("Marking " FMT_BB " that ends with unconditional jump with BBF_HAS_ALIGN for loop at " FMT_BB "\n", bbHavingAlign->bbNum, block->bbNext->bbNum); } bbHavingAlign->bbFlags |= BBF_HAS_ALIGN; minBlockSoFar = BB_MAX_WEIGHT; bbHavingAlign = nullptr; currentAlignedLoopNum = block->bbNext->bbNatLoopNum; if (--loopsToProcess == 0) { break; } } } assert(loopsToProcess == 0); } #endif //------------------------------------------------------------------------ // generatePatchpointInfo: allocate and fill in patchpoint info data, // and report it to the VM // void Compiler::generatePatchpointInfo() { if (!doesMethodHavePatchpoints() && !doesMethodHavePartialCompilationPatchpoints()) { // Nothing to report return; } // Patchpoints are only found in Tier0 code, which is unoptimized, and so // should always have frame pointer. assert(codeGen->isFramePointerUsed()); // Allocate patchpoint info storage from runtime, and fill in initial bits of data. const unsigned patchpointInfoSize = PatchpointInfo::ComputeSize(info.compLocalsCount); PatchpointInfo* const patchpointInfo = (PatchpointInfo*)info.compCompHnd->allocateArray(patchpointInfoSize); // Patchpoint offsets always refer to "virtual frame offsets". // // For x64 this falls out because Tier0 frames are always FP frames, and so the FP-relative // offset is what we want. // // For arm64, if the frame pointer is not at the top of the frame, we need to adjust the // offset. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_AMD64) // We add +TARGET_POINTER_SIZE here is to account for the slot that Jit_Patchpoint // creates when it simulates calling the OSR method (the "pseudo return address" slot). // This is effectively a new slot at the bottom of the Tier0 frame. // const int totalFrameSize = codeGen->genTotalFrameSize() + TARGET_POINTER_SIZE; const int offsetAdjust = 0; #elif defined(TARGET_ARM64) // SP is not manipulated by calls so no frame size adjustment needed. // Local Offsets may need adjusting, if FP is at bottom of frame. // const int totalFrameSize = codeGen->genTotalFrameSize(); const int offsetAdjust = codeGen->genSPtoFPdelta() - totalFrameSize; #else NYI("patchpoint info generation"); const int offsetAdjust = 0; const int totalFrameSize = 0; #endif patchpointInfo->Initialize(info.compLocalsCount, totalFrameSize); JITDUMP("--OSR--- Total Frame Size %d, local offset adjust is %d\n", patchpointInfo->TotalFrameSize(), offsetAdjust); // We record offsets for all the "locals" here. Could restrict // this to just the IL locals with some extra logic, and save a bit of space, // but would need to adjust all consumers, too. for (unsigned lclNum = 0; lclNum < info.compLocalsCount; lclNum++) { LclVarDsc* const varDsc = lvaGetDesc(lclNum); // We expect all these to have stack homes, and be FP relative assert(varDsc->lvOnFrame); assert(varDsc->lvFramePointerBased); // Record FramePtr relative offset (no localloc yet) patchpointInfo->SetOffset(lclNum, varDsc->GetStackOffset() + offsetAdjust); // Note if IL stream contained an address-of that potentially leads to exposure. // This bit of IL may be skipped by OSR partial importation. if (varDsc->lvHasLdAddrOp) { patchpointInfo->SetIsExposed(lclNum); } JITDUMP("--OSR-- V%02u is at virtual offset %d%s\n", lclNum, patchpointInfo->Offset(lclNum), patchpointInfo->IsExposed(lclNum) ? " (exposed)" : ""); } // Special offsets // if (lvaReportParamTypeArg()) { const int offset = lvaCachedGenericContextArgOffset(); patchpointInfo->SetGenericContextArgOffset(offset + offsetAdjust); JITDUMP("--OSR-- cached generic context virtual offset is %d\n", patchpointInfo->GenericContextArgOffset()); } if (lvaKeepAliveAndReportThis()) { const int offset = lvaCachedGenericContextArgOffset(); patchpointInfo->SetKeptAliveThisOffset(offset + offsetAdjust); JITDUMP("--OSR-- kept-alive this virtual offset is %d\n", patchpointInfo->KeptAliveThisOffset()); } if (compGSReorderStackLayout) { assert(lvaGSSecurityCookie != BAD_VAR_NUM); LclVarDsc* const varDsc = lvaGetDesc(lvaGSSecurityCookie); patchpointInfo->SetSecurityCookieOffset(varDsc->GetStackOffset() + offsetAdjust); JITDUMP("--OSR-- security cookie V%02u virtual offset is %d\n", lvaGSSecurityCookie, patchpointInfo->SecurityCookieOffset()); } if (lvaMonAcquired != BAD_VAR_NUM) { LclVarDsc* const varDsc = lvaGetDesc(lvaMonAcquired); patchpointInfo->SetMonitorAcquiredOffset(varDsc->GetStackOffset() + offsetAdjust); JITDUMP("--OSR-- monitor acquired V%02u virtual offset is %d\n", lvaMonAcquired, patchpointInfo->MonitorAcquiredOffset()); } #if defined(TARGET_AMD64) // Record callee save registers. // Currently only needed for x64. // regMaskTP rsPushRegs = codeGen->regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED; rsPushRegs |= RBM_FPBASE; patchpointInfo->SetCalleeSaveRegisters((uint64_t)rsPushRegs); JITDUMP("--OSR-- Tier0 callee saves: "); JITDUMPEXEC(dspRegMask((regMaskTP)patchpointInfo->CalleeSaveRegisters())); JITDUMP("\n"); #endif // Register this with the runtime. info.compCompHnd->setPatchpointInfo(patchpointInfo); } //------------------------------------------------------------------------ // ResetOptAnnotations: Clear annotations produced during global optimizations. // // Notes: // The intent of this method is to clear any information typically assumed // to be set only once; it is used between iterations when JitOptRepeat is // in effect. void Compiler::ResetOptAnnotations() { assert(opts.optRepeat); assert(JitConfig.JitOptRepeatCount() > 0); fgResetForSsa(); vnStore = nullptr; m_opAsgnVarDefSsaNums = nullptr; m_blockToEHPreds = nullptr; fgSsaPassesCompleted = 0; fgVNPassesCompleted = 0; for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { for (GenTree* const tree : stmt->TreeList()) { tree->ClearVN(); tree->ClearAssertion(); tree->gtCSEnum = NO_CSE; } } } } //------------------------------------------------------------------------ // RecomputeLoopInfo: Recompute loop annotations between opt-repeat iterations. // // Notes: // The intent of this method is to update loop structure annotations, and those // they depend on; these annotations may have become stale during optimization, // and need to be up-to-date before running another iteration of optimizations. // void Compiler::RecomputeLoopInfo() { assert(opts.optRepeat); assert(JitConfig.JitOptRepeatCount() > 0); // Recompute reachability sets, dominators, and loops. optResetLoopInfo(); fgDomsComputed = false; fgComputeReachability(); optSetBlockWeights(); // Rebuild the loop tree annotations themselves optFindLoops(); } /*****************************************************************************/ void Compiler::ProcessShutdownWork(ICorStaticInfo* statInfo) { } /*****************************************************************************/ #ifdef DEBUG void* forceFrameJIT; // used to force to frame &useful for fastchecked debugging bool Compiler::skipMethod() { static ConfigMethodRange fJitRange; fJitRange.EnsureInit(JitConfig.JitRange()); assert(!fJitRange.Error()); // Normally JitConfig.JitRange() is null, we don't want to skip // jitting any methods. // // So, the logic below relies on the fact that a null range string // passed to ConfigMethodRange represents the set of all methods. if (!fJitRange.Contains(info.compMethodHash())) { return true; } if (JitConfig.JitExclude().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return true; } if (!JitConfig.JitInclude().isEmpty() && !JitConfig.JitInclude().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return true; } return false; } #endif /*****************************************************************************/ int Compiler::compCompile(CORINFO_MODULE_HANDLE classPtr, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags) { // compInit should have set these already. noway_assert(info.compMethodInfo != nullptr); noway_assert(info.compCompHnd != nullptr); noway_assert(info.compMethodHnd != nullptr); #ifdef FEATURE_JIT_METHOD_PERF static bool checkedForJitTimeLog = false; pCompJitTimer = nullptr; if (!checkedForJitTimeLog) { // Call into VM to get the config strings. FEATURE_JIT_METHOD_PERF is enabled for // retail builds. Do not call the regular Config helper here as it would pull // in a copy of the config parser into the clrjit.dll. InterlockedCompareExchangeT(&Compiler::compJitTimeLogFilename, (LPCWSTR)info.compCompHnd->getJitTimeLogFilename(), NULL); // At a process or module boundary clear the file and start afresh. JitTimer::PrintCsvHeader(); checkedForJitTimeLog = true; } if ((Compiler::compJitTimeLogFilename != nullptr) || (JitTimeLogCsv() != nullptr)) { pCompJitTimer = JitTimer::Create(this, info.compMethodInfo->ILCodeSize); } #endif // FEATURE_JIT_METHOD_PERF #ifdef DEBUG Compiler* me = this; forceFrameJIT = (void*)&me; // let us see the this pointer in fastchecked build // set this early so we can use it without relying on random memory values verbose = compIsForInlining() ? impInlineInfo->InlinerCompiler->verbose : false; #endif #if FUNC_INFO_LOGGING LPCWSTR tmpJitFuncInfoFilename = JitConfig.JitFuncInfoFile(); if (tmpJitFuncInfoFilename != nullptr) { LPCWSTR oldFuncInfoFileName = InterlockedCompareExchangeT(&compJitFuncInfoFilename, tmpJitFuncInfoFilename, NULL); if (oldFuncInfoFileName == nullptr) { assert(compJitFuncInfoFile == nullptr); compJitFuncInfoFile = _wfopen(compJitFuncInfoFilename, W("a")); if (compJitFuncInfoFile == nullptr) { #if defined(DEBUG) && !defined(HOST_UNIX) // no 'perror' in the PAL perror("Failed to open JitFuncInfoLogFile"); #endif // defined(DEBUG) && !defined(HOST_UNIX) } } } #endif // FUNC_INFO_LOGGING // if (s_compMethodsCount==0) setvbuf(jitstdout, NULL, _IONBF, 0); if (compIsForInlining()) { compileFlags->Clear(JitFlags::JIT_FLAG_OSR); info.compILEntry = 0; info.compPatchpointInfo = nullptr; } else if (compileFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { // Fetch OSR info from the runtime info.compPatchpointInfo = info.compCompHnd->getOSRInfo(&info.compILEntry); assert(info.compPatchpointInfo != nullptr); } #if defined(TARGET_ARM64) compFrameInfo = {0}; #endif virtualStubParamInfo = new (this, CMK_Unknown) VirtualStubParamInfo(IsTargetAbi(CORINFO_CORERT_ABI)); // compMatchedVM is set to true if both CPU/ABI and OS are matching the execution engine requirements // // Do we have a matched VM? Or are we "abusing" the VM to help us do JIT work (such as using an x86 native VM // with an ARM-targeting "altjit"). // Match CPU/ABI for compMatchedVM info.compMatchedVM = IMAGE_FILE_MACHINE_TARGET == info.compCompHnd->getExpectedTargetArchitecture(); // Match OS for compMatchedVM CORINFO_EE_INFO* eeInfo = eeGetEEInfo(); #ifdef TARGET_OS_RUNTIMEDETERMINED noway_assert(TargetOS::OSSettingConfigured); #endif if (TargetOS::IsMacOS) { info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_MACOS); } else if (TargetOS::IsUnix) { if (TargetArchitecture::IsX64) { // MacOS x64 uses the Unix jit variant in crossgen2, not a special jit info.compMatchedVM = info.compMatchedVM && ((eeInfo->osType == CORINFO_UNIX) || (eeInfo->osType == CORINFO_MACOS)); } else { info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_UNIX); } } else if (TargetOS::IsWindows) { info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_WINNT); } // If we are not compiling for a matched VM, then we are getting JIT flags that don't match our target // architecture. The two main examples here are an ARM targeting altjit hosted on x86 and an ARM64 // targeting altjit hosted on x64. (Though with cross-bitness work, the host doesn't necessarily need // to be of the same bitness.) In these cases, we need to fix up the JIT flags to be appropriate for // the target, as the VM's expected target may overlap bit flags with different meaning to our target. // Note that it might be better to do this immediately when setting the JIT flags in CILJit::compileMethod() // (when JitFlags::SetFromFlags() is called), but this is close enough. (To move this logic to // CILJit::compileMethod() would require moving the info.compMatchedVM computation there as well.) if (!info.compMatchedVM) { #if defined(TARGET_ARM) // Currently nothing needs to be done. There are no ARM flags that conflict with other flags. #endif // defined(TARGET_ARM) #if defined(TARGET_ARM64) // The x86/x64 architecture capabilities flags overlap with the ARM64 ones. Set a reasonable architecture // target default. Currently this is disabling all ARM64 architecture features except FP and SIMD, but this // should be altered to possibly enable all of them, when they are known to all work. CORINFO_InstructionSetFlags defaultArm64Flags; defaultArm64Flags.AddInstructionSet(InstructionSet_ArmBase); defaultArm64Flags.AddInstructionSet(InstructionSet_AdvSimd); defaultArm64Flags.Set64BitInstructionSetVariants(); compileFlags->SetInstructionSetFlags(defaultArm64Flags); #endif // defined(TARGET_ARM64) } compMaxUncheckedOffsetForNullObject = eeGetEEInfo()->maxUncheckedOffsetForNullObject; // Set the context for token lookup. if (compIsForInlining()) { impTokenLookupContextHandle = impInlineInfo->tokenLookupContextHandle; assert(impInlineInfo->inlineCandidateInfo->clsHandle == info.compCompHnd->getMethodClass(info.compMethodHnd)); info.compClassHnd = impInlineInfo->inlineCandidateInfo->clsHandle; assert(impInlineInfo->inlineCandidateInfo->clsAttr == info.compCompHnd->getClassAttribs(info.compClassHnd)); // printf("%x != %x\n", impInlineInfo->inlineCandidateInfo->clsAttr, // info.compCompHnd->getClassAttribs(info.compClassHnd)); info.compClassAttr = impInlineInfo->inlineCandidateInfo->clsAttr; } else { impTokenLookupContextHandle = METHOD_BEING_COMPILED_CONTEXT(); info.compClassHnd = info.compCompHnd->getMethodClass(info.compMethodHnd); info.compClassAttr = info.compCompHnd->getClassAttribs(info.compClassHnd); } #ifdef DEBUG if (JitConfig.EnableExtraSuperPmiQueries()) { // This call to getClassModule/getModuleAssembly/getAssemblyName fails in crossgen2 due to these // APIs being unimplemented. So disable this extra info for pre-jit mode. See // https://github.com/dotnet/runtime/issues/48888. // // Ditto for some of the class name queries for generic params. // if (!compileFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { // Get the assembly name, to aid finding any particular SuperPMI method context function (void)info.compCompHnd->getAssemblyName( info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd))); // Fetch class names for the method's generic parameters. // CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(info.compMethodHnd, &sig, nullptr); const unsigned classInst = sig.sigInst.classInstCount; if (classInst > 0) { for (unsigned i = 0; i < classInst; i++) { eeGetClassName(sig.sigInst.classInst[i]); } } const unsigned methodInst = sig.sigInst.methInstCount; if (methodInst > 0) { for (unsigned i = 0; i < methodInst; i++) { eeGetClassName(sig.sigInst.methInst[i]); } } } } #endif // DEBUG info.compProfilerCallback = false; // Assume false until we are told to hook this method. #ifdef DEBUG if (!compIsForInlining()) { JitTls::GetLogEnv()->setCompiler(this); } // Have we been told to be more selective in our Jitting? if (skipMethod()) { if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_MARKED_AS_SKIPPED); } return CORJIT_SKIPPED; } #endif // DEBUG // Set this before the first 'BADCODE' // Skip verification where possible assert(compileFlags->IsSet(JitFlags::JIT_FLAG_SKIP_VERIFICATION)); /* Setup an error trap */ struct Param { Compiler* pThis; CORINFO_MODULE_HANDLE classPtr; COMP_HANDLE compHnd; CORINFO_METHOD_INFO* methodInfo; void** methodCodePtr; uint32_t* methodCodeSize; JitFlags* compileFlags; int result; } param; param.pThis = this; param.classPtr = classPtr; param.compHnd = info.compCompHnd; param.methodInfo = info.compMethodInfo; param.methodCodePtr = methodCodePtr; param.methodCodeSize = methodCodeSize; param.compileFlags = compileFlags; param.result = CORJIT_INTERNALERROR; setErrorTrap(info.compCompHnd, Param*, pParam, &param) // ERROR TRAP: Start normal block { pParam->result = pParam->pThis->compCompileHelper(pParam->classPtr, pParam->compHnd, pParam->methodInfo, pParam->methodCodePtr, pParam->methodCodeSize, pParam->compileFlags); } finallyErrorTrap() // ERROR TRAP: The following block handles errors { /* Cleanup */ if (compIsForInlining()) { goto DoneCleanUp; } /* Tell the emitter that we're done with this function */ GetEmitter()->emitEndCG(); DoneCleanUp: compDone(); } endErrorTrap() // ERROR TRAP: End return param.result; } #if defined(DEBUG) || defined(INLINE_DATA) //------------------------------------------------------------------------ // compMethodHash: get hash code for currently jitted method // // Returns: // Hash based on method's full name // unsigned Compiler::Info::compMethodHash() const { if (compMethodHashPrivate == 0) { // compMethodHashPrivate = compCompHnd->getMethodHash(compMethodHnd); assert(compFullName != nullptr); assert(*compFullName != 0); COUNT_T hash = HashStringA(compFullName); // Use compFullName to generate the hash, as it contains the signature // and return type compMethodHashPrivate = hash; } return compMethodHashPrivate; } //------------------------------------------------------------------------ // compMethodHash: get hash code for specified method // // Arguments: // methodHnd - method of interest // // Returns: // Hash based on method's full name // unsigned Compiler::compMethodHash(CORINFO_METHOD_HANDLE methodHnd) { // If this is the root method, delegate to the caching version // if (methodHnd == info.compMethodHnd) { return info.compMethodHash(); } // Else compute from scratch. Might consider caching this too. // unsigned methodHash = 0; const char* calleeName = eeGetMethodFullName(methodHnd); if (calleeName != nullptr) { methodHash = HashStringA(calleeName); } else { methodHash = info.compCompHnd->getMethodHash(methodHnd); } return methodHash; } #endif // defined(DEBUG) || defined(INLINE_DATA) void Compiler::compCompileFinish() { #if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS genMethodCnt++; #endif #if MEASURE_MEM_ALLOC { compArenaAllocator->finishMemStats(); memAllocHist.record((unsigned)((compArenaAllocator->getTotalBytesAllocated() + 1023) / 1024)); memUsedHist.record((unsigned)((compArenaAllocator->getTotalBytesUsed() + 1023) / 1024)); } #ifdef DEBUG if (s_dspMemStats || verbose) { printf("\nAllocations for %s (MethodHash=%08x)\n", info.compFullName, info.compMethodHash()); compArenaAllocator->dumpMemStats(jitstdout); } #endif // DEBUG #endif // MEASURE_MEM_ALLOC #if LOOP_HOIST_STATS AddLoopHoistStats(); #endif // LOOP_HOIST_STATS #if MEASURE_NODE_SIZE genTreeNcntHist.record(static_cast<unsigned>(genNodeSizeStatsPerFunc.genTreeNodeCnt)); genTreeNsizHist.record(static_cast<unsigned>(genNodeSizeStatsPerFunc.genTreeNodeSize)); #endif #if defined(DEBUG) // Small methods should fit in ArenaAllocator::getDefaultPageSize(), or else // we should bump up ArenaAllocator::getDefaultPageSize() if ((info.compILCodeSize <= 32) && // Is it a reasonably small method? (info.compNativeCodeSize < 512) && // Some trivial methods generate huge native code. eg. pushing a single huge // struct (impInlinedCodeSize <= 128) && // Is the the inlining reasonably bounded? // Small methods cannot meaningfully have a big number of locals // or arguments. We always track arguments at the start of // the prolog which requires memory (info.compLocalsCount <= 32) && (!opts.MinOpts()) && // We may have too many local variables, etc (getJitStressLevel() == 0) && // We need extra memory for stress !opts.optRepeat && // We need extra memory to repeat opts !compArenaAllocator->bypassHostAllocator() && // ArenaAllocator::getDefaultPageSize() is artificially low for // DirectAlloc // Factor of 2x is because data-structures are bigger under DEBUG (compArenaAllocator->getTotalBytesAllocated() > (2 * ArenaAllocator::getDefaultPageSize())) && // RyuJIT backend needs memory tuning! TODO-Cleanup: remove this case when memory tuning is complete. (compArenaAllocator->getTotalBytesAllocated() > (10 * ArenaAllocator::getDefaultPageSize())) && !verbose) // We allocate lots of memory to convert sets to strings for JitDump { genSmallMethodsNeedingExtraMemoryCnt++; // Less than 1% of all methods should run into this. // We cannot be more strict as there are always degenerate cases where we // would need extra memory (like huge structs as locals - see lvaSetStruct()). assert((genMethodCnt < 500) || (genSmallMethodsNeedingExtraMemoryCnt < (genMethodCnt / 100))); } #endif // DEBUG #if defined(DEBUG) || defined(INLINE_DATA) m_inlineStrategy->DumpData(); if (JitConfig.JitInlineDumpXmlFile() != nullptr) { FILE* file = _wfopen(JitConfig.JitInlineDumpXmlFile(), W("a")); if (file != nullptr) { m_inlineStrategy->DumpXml(file); fclose(file); } else { m_inlineStrategy->DumpXml(); } } else { m_inlineStrategy->DumpXml(); } #endif #ifdef DEBUG if (opts.dspOrder) { // mdMethodDef __stdcall CEEInfo::getMethodDefFromMethod(CORINFO_METHOD_HANDLE hMethod) mdMethodDef currentMethodToken = info.compCompHnd->getMethodDefFromMethod(info.compMethodHnd); static bool headerPrinted = false; if (!headerPrinted) { // clang-format off headerPrinted = true; printf(" | Profiled | Method | Method has | calls | Num |LclV |AProp| CSE | Perf |bytes | %3s codesize| \n", Target::g_tgtCPUName); printf(" mdToken | CNT | RGN | Hash | EH | FRM | LOOP | NRM | IND | BBs | Cnt | Cnt | Cnt | Score | IL | HOT | CLD | method name \n"); printf("---------+------+------+----------+----+-----+------+-----+-----+-----+-----+-----+-----+---------+------+-------+-----+\n"); // 06001234 | 1234 | HOT | 0f1e2d3c | EH | ebp | LOOP | 15 | 6 | 12 | 17 | 12 | 8 | 1234.56 | 145 | 1234 | 123 | System.Example(int) // clang-format on } printf("%08X | ", currentMethodToken); if (fgHaveProfileData()) { if (fgCalledCount < 1000) { printf("%4.0f | ", fgCalledCount); } else if (fgCalledCount < 1000000) { printf("%3.0fK | ", fgCalledCount / 1000); } else { printf("%3.0fM | ", fgCalledCount / 1000000); } } else { printf(" | "); } CorInfoRegionKind regionKind = info.compMethodInfo->regionKind; if (opts.altJit) { printf("ALT | "); } else if (regionKind == CORINFO_REGION_NONE) { printf(" | "); } else if (regionKind == CORINFO_REGION_HOT) { printf(" HOT | "); } else if (regionKind == CORINFO_REGION_COLD) { printf("COLD | "); } else if (regionKind == CORINFO_REGION_JIT) { printf(" JIT | "); } else { printf("UNKN | "); } printf("%08x | ", info.compMethodHash()); if (compHndBBtabCount > 0) { printf("EH | "); } else { printf(" | "); } if (rpFrameType == FT_EBP_FRAME) { printf("%3s | ", STR_FPBASE); } else if (rpFrameType == FT_ESP_FRAME) { printf("%3s | ", STR_SPBASE); } #if DOUBLE_ALIGN else if (rpFrameType == FT_DOUBLE_ALIGN_FRAME) { printf("dbl | "); } #endif else // (rpFrameType == FT_NOT_SET) { printf("??? | "); } if (fgHasLoops) { printf("LOOP |"); } else { printf(" |"); } printf(" %3d |", optCallCount); printf(" %3d |", optIndirectCallCount); printf(" %3d |", fgBBcountAtCodegen); printf(" %3d |", lvaCount); if (opts.MinOpts()) { printf(" MinOpts |"); } else { printf(" %3d |", optAssertionCount); printf(" %3d |", optCSEcount); } if (info.compPerfScore < 9999.995) { printf(" %7.2f |", info.compPerfScore); } else { printf(" %7.0f |", info.compPerfScore); } printf(" %4d |", info.compMethodInfo->ILCodeSize); printf(" %5d |", info.compTotalHotCodeSize); printf(" %3d |", info.compTotalColdCodeSize); printf(" %s\n", eeGetMethodFullName(info.compMethodHnd)); printf(""); // in our logic this causes a flush } if (verbose) { printf("****** DONE compiling %s\n", info.compFullName); printf(""); // in our logic this causes a flush } #if TRACK_ENREG_STATS for (unsigned i = 0; i < lvaCount; ++i) { const LclVarDsc* varDsc = lvaGetDesc(i); if (varDsc->lvRefCnt() != 0) { s_enregisterStats.RecordLocal(varDsc); } } #endif // TRACK_ENREG_STATS // Only call _DbgBreakCheck when we are jitting, not when we are ngen-ing // For ngen the int3 or breakpoint instruction will be right at the // start of the ngen method and we will stop when we execute it. // if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if (compJitHaltMethod()) { #if !defined(HOST_UNIX) // TODO-UNIX: re-enable this when we have an OS that supports a pop-up dialog // Don't do an assert, but just put up the dialog box so we get just-in-time debugger // launching. When you hit 'retry' it will continue and naturally stop at the INT 3 // that the JIT put in the code _DbgBreakCheck(__FILE__, __LINE__, "JitHalt"); #endif } } #endif // DEBUG } #ifdef PSEUDORANDOM_NOP_INSERTION // this is zlib adler32 checksum. source came from windows base #define BASE 65521L // largest prime smaller than 65536 #define NMAX 5552 // NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 #define DO1(buf, i) \ { \ s1 += buf[i]; \ s2 += s1; \ } #define DO2(buf, i) \ DO1(buf, i); \ DO1(buf, i + 1); #define DO4(buf, i) \ DO2(buf, i); \ DO2(buf, i + 2); #define DO8(buf, i) \ DO4(buf, i); \ DO4(buf, i + 4); #define DO16(buf) \ DO8(buf, 0); \ DO8(buf, 8); unsigned adler32(unsigned adler, char* buf, unsigned int len) { unsigned int s1 = adler & 0xffff; unsigned int s2 = (adler >> 16) & 0xffff; int k; if (buf == NULL) return 1L; while (len > 0) { k = len < NMAX ? len : NMAX; len -= k; while (k >= 16) { DO16(buf); buf += 16; k -= 16; } if (k != 0) do { s1 += *buf++; s2 += s1; } while (--k); s1 %= BASE; s2 %= BASE; } return (s2 << 16) | s1; } #endif unsigned getMethodBodyChecksum(_In_z_ char* code, int size) { #ifdef PSEUDORANDOM_NOP_INSERTION return adler32(0, code, size); #else return 0; #endif } int Compiler::compCompileHelper(CORINFO_MODULE_HANDLE classPtr, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags) { CORINFO_METHOD_HANDLE methodHnd = info.compMethodHnd; info.compCode = methodInfo->ILCode; info.compILCodeSize = methodInfo->ILCodeSize; info.compILImportSize = 0; if (info.compILCodeSize == 0) { BADCODE("code size is zero"); } if (compIsForInlining()) { #ifdef DEBUG unsigned methAttr_Old = impInlineInfo->inlineCandidateInfo->methAttr; unsigned methAttr_New = info.compCompHnd->getMethodAttribs(info.compMethodHnd); unsigned flagsToIgnore = CORINFO_FLG_DONT_INLINE | CORINFO_FLG_FORCEINLINE; assert((methAttr_Old & (~flagsToIgnore)) == (methAttr_New & (~flagsToIgnore))); #endif info.compFlags = impInlineInfo->inlineCandidateInfo->methAttr; compInlineContext = impInlineInfo->inlineContext; } else { info.compFlags = info.compCompHnd->getMethodAttribs(info.compMethodHnd); #ifdef PSEUDORANDOM_NOP_INSERTION info.compChecksum = getMethodBodyChecksum((char*)methodInfo->ILCode, methodInfo->ILCodeSize); #endif compInlineContext = m_inlineStrategy->GetRootContext(); } compSwitchedToOptimized = false; compSwitchedToMinOpts = false; // compInitOptions will set the correct verbose flag. compInitOptions(compileFlags); if (!compIsForInlining() && !opts.altJit && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT)) { // We're an altjit, but the COMPlus_AltJit configuration did not say to compile this method, // so skip it. return CORJIT_SKIPPED; } #ifdef DEBUG if (verbose) { printf("IL to import:\n"); dumpILRange(info.compCode, info.compILCodeSize); } #endif // Check for COMPlus_AggressiveInlining if (JitConfig.JitAggressiveInlining()) { compDoAggressiveInlining = true; } if (compDoAggressiveInlining) { info.compFlags |= CORINFO_FLG_FORCEINLINE; } #ifdef DEBUG // Check for ForceInline stress. if (compStressCompile(STRESS_FORCE_INLINE, 0)) { info.compFlags |= CORINFO_FLG_FORCEINLINE; } if (compIsForInlining()) { JITLOG((LL_INFO100000, "\nINLINER impTokenLookupContextHandle for %s is 0x%p.\n", eeGetMethodFullName(info.compMethodHnd), dspPtr(impTokenLookupContextHandle))); } #endif // DEBUG impCanReimport = compStressCompile(STRESS_CHK_REIMPORT, 15); /* Initialize set a bunch of global values */ info.compScopeHnd = classPtr; info.compXcptnsCount = methodInfo->EHcount; info.compMaxStack = methodInfo->maxStack; compHndBBtab = nullptr; compHndBBtabCount = 0; compHndBBtabAllocCount = 0; info.compNativeCodeSize = 0; info.compTotalHotCodeSize = 0; info.compTotalColdCodeSize = 0; info.compClassProbeCount = 0; compHasBackwardJump = false; compHasBackwardJumpInHandler = false; #ifdef DEBUG compCurBB = nullptr; lvaTable = nullptr; // Reset node and block ID counter compGenTreeID = 0; compStatementID = 0; compBasicBlockID = 0; #endif /* Initialize emitter */ if (!compIsForInlining()) { codeGen->GetEmitter()->emitBegCG(this, compHnd); } info.compIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0; info.compPublishStubParam = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PUBLISH_SECRET_PARAM); info.compHasNextCallRetAddr = false; if (opts.IsReversePInvoke()) { bool unused; info.compCallConv = info.compCompHnd->getUnmanagedCallConv(methodInfo->ftn, nullptr, &unused); info.compArgOrder = Target::g_tgtUnmanagedArgOrder; } else { info.compCallConv = CorInfoCallConvExtension::Managed; info.compArgOrder = Target::g_tgtArgOrder; } info.compIsVarArgs = false; switch (methodInfo->args.getCallConv()) { case CORINFO_CALLCONV_NATIVEVARARG: case CORINFO_CALLCONV_VARARG: info.compIsVarArgs = true; break; default: break; } info.compRetNativeType = info.compRetType = JITtype2varType(methodInfo->args.retType); info.compUnmanagedCallCountWithGCTransition = 0; info.compLvFrameListRoot = BAD_VAR_NUM; info.compInitMem = ((methodInfo->options & CORINFO_OPT_INIT_LOCALS) != 0); /* Allocate the local variable table */ lvaInitTypeRef(); compInitDebuggingInfo(); // If are an altjit and have patchpoint info, we might need to tweak the frame size // so it's plausible for the altjit architecture. // if (!info.compMatchedVM && compileFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { assert(info.compLocalsCount == info.compPatchpointInfo->NumberOfLocals()); const int totalFrameSize = info.compPatchpointInfo->TotalFrameSize(); int frameSizeUpdate = 0; #if defined(TARGET_AMD64) if ((totalFrameSize % 16) != 8) { frameSizeUpdate = 8; } #elif defined(TARGET_ARM64) if ((totalFrameSize % 16) != 0) { frameSizeUpdate = 8; } #endif if (frameSizeUpdate != 0) { JITDUMP("Mismatched altjit + OSR -- updating tier0 frame size from %d to %d\n", totalFrameSize, totalFrameSize + frameSizeUpdate); // Allocate a local copy with altered frame size. // const unsigned patchpointInfoSize = PatchpointInfo::ComputeSize(info.compLocalsCount); PatchpointInfo* const newInfo = (PatchpointInfo*)getAllocator(CMK_Unknown).allocate<char>(patchpointInfoSize); newInfo->Initialize(info.compLocalsCount, totalFrameSize + frameSizeUpdate); newInfo->Copy(info.compPatchpointInfo); // Swap it in place. // info.compPatchpointInfo = newInfo; } } #ifdef DEBUG if (compIsForInlining()) { compBasicBlockID = impInlineInfo->InlinerCompiler->compBasicBlockID; } #endif const bool forceInline = !!(info.compFlags & CORINFO_FLG_FORCEINLINE); if (!compIsForInlining() && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { // We're prejitting the root method. We also will analyze it as // a potential inline candidate. InlineResult prejitResult(this, methodHnd, "prejit"); // Profile data allows us to avoid early "too many IL bytes" outs. prejitResult.NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, fgHaveSufficientProfileData()); // Do the initial inline screen. impCanInlineIL(methodHnd, methodInfo, forceInline, &prejitResult); // Temporarily install the prejitResult as the // compInlineResult so it's available to fgFindJumpTargets // and can accumulate more observations as the IL is // scanned. // // We don't pass prejitResult in as a parameter to avoid // potential aliasing confusion -- the other call to // fgFindBasicBlocks may have set up compInlineResult and // the code in fgFindJumpTargets references that data // member extensively. assert(compInlineResult == nullptr); assert(impInlineInfo == nullptr); compInlineResult = &prejitResult; // Find the basic blocks. We must do this regardless of // inlineability, since we are prejitting this method. // // This will also update the status of this method as // an inline candidate. fgFindBasicBlocks(); // Undo the temporary setup. assert(compInlineResult == &prejitResult); compInlineResult = nullptr; // If still a viable, discretionary inline, assess // profitability. if (prejitResult.IsDiscretionaryCandidate()) { prejitResult.DetermineProfitability(methodInfo); } m_inlineStrategy->NotePrejitDecision(prejitResult); // Handle the results of the inline analysis. if (prejitResult.IsFailure()) { // This method is a bad inlinee according to our // analysis. We will let the InlineResult destructor // mark it as noinline in the prejit image to save the // jit some work. // // This decision better not be context-dependent. assert(prejitResult.IsNever()); } else { // This looks like a viable inline candidate. Since // we're not actually inlining, don't report anything. prejitResult.SetReported(); } } else { // We are jitting the root method, or inlining. fgFindBasicBlocks(); // If we are doing OSR, update flow to initially reach the appropriate IL offset. // if (opts.IsOSR()) { fgFixEntryFlowForOSR(); } } // If we're inlining and the candidate is bad, bail out. if (compDonotInline()) { goto _Next; } // We may decide to optimize this method, // to avoid spending a long time stuck in Tier0 code. // if (fgCanSwitchToOptimized()) { // We only expect to be able to do this at Tier0. // assert(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)); // Normal tiering should bail us out of Tier0 tail call induced loops. // So keep these methods in Tier0 if we're gathering PGO data. // If we're not gathering PGO, then switch these to optimized to // minimize the number of tail call helper stubs we might need. // Reconsider this if/when we're able to share those stubs. // // Honor the config setting that tells the jit to // always optimize methods with loops. // // If neither of those apply, and OSR is enabled, the jit may still // decide to optimize, if there's something in the method that // OSR currently cannot handle, or we're optionally suppressing // OSR by method hash. // const char* reason = nullptr; if (compTailPrefixSeen && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { reason = "tail.call and not BBINSTR"; } else if (compHasBackwardJump && ((info.compFlags & CORINFO_FLG_DISABLE_TIER0_FOR_LOOPS) != 0)) { reason = "loop"; } if (compHasBackwardJump && (reason == nullptr) && (JitConfig.TC_OnStackReplacement() > 0)) { const char* noPatchpointReason = nullptr; bool canEscapeViaOSR = compCanHavePatchpoints(&reason); #ifdef DEBUG if (canEscapeViaOSR) { // Optionally disable OSR by method hash. This will force any // method that might otherwise get trapped in Tier0 to be optimized. // static ConfigMethodRange JitEnableOsrRange; JitEnableOsrRange.EnsureInit(JitConfig.JitEnableOsrRange()); const unsigned hash = impInlineRoot()->info.compMethodHash(); if (!JitEnableOsrRange.Contains(hash)) { canEscapeViaOSR = false; reason = "OSR disabled by JitEnableOsrRange"; } } #endif if (canEscapeViaOSR) { JITDUMP("\nOSR enabled for this method\n"); } else { JITDUMP("\nOSR disabled for this method: %s\n", noPatchpointReason); assert(reason != nullptr); } } if (reason != nullptr) { fgSwitchToOptimized(reason); } } compSetOptimizationLevel(); #if COUNT_BASIC_BLOCKS bbCntTable.record(fgBBcount); if (fgBBcount == 1) { bbOneBBSizeTable.record(methodInfo->ILCodeSize); } #endif // COUNT_BASIC_BLOCKS #ifdef DEBUG if (verbose) { printf("Basic block list for '%s'\n", info.compFullName); fgDispBasicBlocks(); } #endif #ifdef DEBUG /* Give the function a unique number */ if (opts.disAsm || verbose) { compMethodID = ~info.compMethodHash() & 0xffff; } else { compMethodID = InterlockedIncrement(&s_compMethodsCount); } #endif if (compIsForInlining()) { compInlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_BASIC_BLOCKS, fgBBcount); if (compInlineResult->IsFailure()) { goto _Next; } } #ifdef DEBUG if ((JitConfig.DumpJittedMethods() == 1) && !compIsForInlining()) { enum { BUFSIZE = 20 }; char osrBuffer[BUFSIZE] = {0}; if (opts.IsOSR()) { // Tiering name already includes "OSR", we just want the IL offset // sprintf_s(osrBuffer, BUFSIZE, " @0x%x", info.compILEntry); } printf("Compiling %4d %s::%s, IL size = %u, hash=0x%08x %s%s%s\n", Compiler::jitTotalMethodCompiled, info.compClassName, info.compMethodName, info.compILCodeSize, info.compMethodHash(), compGetTieringName(), osrBuffer, compGetStressMessage()); } if (compIsForInlining()) { compGenTreeID = impInlineInfo->InlinerCompiler->compGenTreeID; compStatementID = impInlineInfo->InlinerCompiler->compStatementID; } #endif compCompile(methodCodePtr, methodCodeSize, compileFlags); #ifdef DEBUG if (compIsForInlining()) { impInlineInfo->InlinerCompiler->compGenTreeID = compGenTreeID; impInlineInfo->InlinerCompiler->compStatementID = compStatementID; impInlineInfo->InlinerCompiler->compBasicBlockID = compBasicBlockID; } #endif _Next: if (compDonotInline()) { // Verify we have only one inline result in play. assert(impInlineInfo->inlineResult == compInlineResult); } if (!compIsForInlining()) { compCompileFinish(); // Did we just compile for a target architecture that the VM isn't expecting? If so, the VM // can't used the generated code (and we better be an AltJit!). if (!info.compMatchedVM) { return CORJIT_SKIPPED; } #ifdef DEBUG if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT) && JitConfig.RunAltJitCode() == 0) { return CORJIT_SKIPPED; } #endif // DEBUG } /* Success! */ return CORJIT_OK; } //------------------------------------------------------------------------ // compFindLocalVarLinear: Linear search for variable's scope containing offset. // // Arguments: // varNum The variable number to search for in the array of scopes. // offs The offset value which should occur within the life of the variable. // // Return Value: // VarScopeDsc* of a matching variable that contains the offset within its life // begin and life end or nullptr when there is no match found. // // Description: // Linear search for matching variables with their life begin and end containing // the offset. // or NULL if one couldn't be found. // // Note: // Usually called for scope count = 4. Could be called for values upto 8. // VarScopeDsc* Compiler::compFindLocalVarLinear(unsigned varNum, unsigned offs) { for (unsigned i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* dsc = &info.compVarScopes[i]; if ((dsc->vsdVarNum == varNum) && (dsc->vsdLifeBeg <= offs) && (dsc->vsdLifeEnd > offs)) { return dsc; } } return nullptr; } //------------------------------------------------------------------------ // compFindLocalVar: Search for variable's scope containing offset. // // Arguments: // varNum The variable number to search for in the array of scopes. // offs The offset value which should occur within the life of the variable. // // Return Value: // VarScopeDsc* of a matching variable that contains the offset within its life // begin and life end. // or NULL if one couldn't be found. // // Description: // Linear search for matching variables with their life begin and end containing // the offset only when the scope count is < MAX_LINEAR_FIND_LCL_SCOPELIST, // else use the hashtable lookup. // VarScopeDsc* Compiler::compFindLocalVar(unsigned varNum, unsigned offs) { if (info.compVarScopesCount < MAX_LINEAR_FIND_LCL_SCOPELIST) { return compFindLocalVarLinear(varNum, offs); } else { VarScopeDsc* ret = compFindLocalVar(varNum, offs, offs); assert(ret == compFindLocalVarLinear(varNum, offs)); return ret; } } //------------------------------------------------------------------------ // compFindLocalVar: Search for variable's scope containing offset. // // Arguments: // varNum The variable number to search for in the array of scopes. // lifeBeg The life begin of the variable's scope // lifeEnd The life end of the variable's scope // // Return Value: // VarScopeDsc* of a matching variable that contains the offset within its life // begin and life end, or NULL if one couldn't be found. // // Description: // Following are the steps used: // 1. Index into the hashtable using varNum. // 2. Iterate through the linked list at index varNum to find a matching // var scope. // VarScopeDsc* Compiler::compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd) { assert(compVarScopeMap != nullptr); VarScopeMapInfo* info; if (compVarScopeMap->Lookup(varNum, &info)) { VarScopeListNode* list = info->head; while (list != nullptr) { if ((list->data->vsdLifeBeg <= lifeBeg) && (list->data->vsdLifeEnd > lifeEnd)) { return list->data; } list = list->next; } } return nullptr; } //------------------------------------------------------------------------- // compInitVarScopeMap: Create a scope map so it can be looked up by varNum // // Description: // Map.K => Map.V :: varNum => List(ScopeDsc) // // Create a scope map that can be indexed by varNum and can be iterated // on it's values to look for matching scope when given an offs or // lifeBeg and lifeEnd. // // Notes: // 1. Build the map only when we think linear search is slow, i.e., // MAX_LINEAR_FIND_LCL_SCOPELIST is large. // 2. Linked list preserves original array order. // void Compiler::compInitVarScopeMap() { if (info.compVarScopesCount < MAX_LINEAR_FIND_LCL_SCOPELIST) { return; } assert(compVarScopeMap == nullptr); compVarScopeMap = new (getAllocator()) VarNumToScopeDscMap(getAllocator()); // 599 prime to limit huge allocations; for ex: duplicated scopes on single var. compVarScopeMap->Reallocate(min(info.compVarScopesCount, 599)); for (unsigned i = 0; i < info.compVarScopesCount; ++i) { unsigned varNum = info.compVarScopes[i].vsdVarNum; VarScopeListNode* node = VarScopeListNode::Create(&info.compVarScopes[i], getAllocator()); // Index by varNum and if the list exists append "node" to the "list". VarScopeMapInfo* info; if (compVarScopeMap->Lookup(varNum, &info)) { info->tail->next = node; info->tail = node; } // Create a new list. else { info = VarScopeMapInfo::Create(node, getAllocator()); compVarScopeMap->Set(varNum, info); } } } struct genCmpLocalVarLifeBeg { bool operator()(const VarScopeDsc* elem1, const VarScopeDsc* elem2) { return elem1->vsdLifeBeg < elem2->vsdLifeBeg; } }; struct genCmpLocalVarLifeEnd { bool operator()(const VarScopeDsc* elem1, const VarScopeDsc* elem2) { return elem1->vsdLifeEnd < elem2->vsdLifeEnd; } }; inline void Compiler::compInitScopeLists() { if (info.compVarScopesCount == 0) { compEnterScopeList = compExitScopeList = nullptr; return; } // Populate the 'compEnterScopeList' and 'compExitScopeList' lists compEnterScopeList = new (this, CMK_DebugInfo) VarScopeDsc*[info.compVarScopesCount]; compExitScopeList = new (this, CMK_DebugInfo) VarScopeDsc*[info.compVarScopesCount]; for (unsigned i = 0; i < info.compVarScopesCount; i++) { compEnterScopeList[i] = compExitScopeList[i] = &info.compVarScopes[i]; } jitstd::sort(compEnterScopeList, compEnterScopeList + info.compVarScopesCount, genCmpLocalVarLifeBeg()); jitstd::sort(compExitScopeList, compExitScopeList + info.compVarScopesCount, genCmpLocalVarLifeEnd()); } void Compiler::compResetScopeLists() { if (info.compVarScopesCount == 0) { return; } assert(compEnterScopeList && compExitScopeList); compNextEnterScope = compNextExitScope = 0; } VarScopeDsc* Compiler::compGetNextEnterScope(unsigned offs, bool scan) { assert(info.compVarScopesCount); assert(compEnterScopeList && compExitScopeList); if (compNextEnterScope < info.compVarScopesCount) { assert(compEnterScopeList[compNextEnterScope]); unsigned nextEnterOff = compEnterScopeList[compNextEnterScope]->vsdLifeBeg; assert(scan || (offs <= nextEnterOff)); if (!scan) { if (offs == nextEnterOff) { return compEnterScopeList[compNextEnterScope++]; } } else { if (nextEnterOff <= offs) { return compEnterScopeList[compNextEnterScope++]; } } } return nullptr; } VarScopeDsc* Compiler::compGetNextExitScope(unsigned offs, bool scan) { assert(info.compVarScopesCount); assert(compEnterScopeList && compExitScopeList); if (compNextExitScope < info.compVarScopesCount) { assert(compExitScopeList[compNextExitScope]); unsigned nextExitOffs = compExitScopeList[compNextExitScope]->vsdLifeEnd; assert(scan || (offs <= nextExitOffs)); if (!scan) { if (offs == nextExitOffs) { return compExitScopeList[compNextExitScope++]; } } else { if (nextExitOffs <= offs) { return compExitScopeList[compNextExitScope++]; } } } return nullptr; } // The function will call the callback functions for scopes with boundaries // at instrs from the current status of the scope lists to 'offset', // ordered by instrs. void Compiler::compProcessScopesUntil(unsigned offset, VARSET_TP* inScope, void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*), void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*)) { assert(offset != BAD_IL_OFFSET); assert(inScope != nullptr); bool foundExit = false, foundEnter = true; VarScopeDsc* scope; VarScopeDsc* nextExitScope = nullptr; VarScopeDsc* nextEnterScope = nullptr; unsigned offs = offset, curEnterOffs = 0; goto START_FINDING_SCOPES; // We need to determine the scopes which are open for the current block. // This loop walks over the missing blocks between the current and the // previous block, keeping the enter and exit offsets in lockstep. do { foundExit = foundEnter = false; if (nextExitScope) { (this->*exitScopeFn)(inScope, nextExitScope); nextExitScope = nullptr; foundExit = true; } offs = nextEnterScope ? nextEnterScope->vsdLifeBeg : offset; while ((scope = compGetNextExitScope(offs, true)) != nullptr) { foundExit = true; if (!nextEnterScope || scope->vsdLifeEnd > nextEnterScope->vsdLifeBeg) { // We overshot the last found Enter scope. Save the scope for later // and find an entering scope nextExitScope = scope; break; } (this->*exitScopeFn)(inScope, scope); } if (nextEnterScope) { (this->*enterScopeFn)(inScope, nextEnterScope); curEnterOffs = nextEnterScope->vsdLifeBeg; nextEnterScope = nullptr; foundEnter = true; } offs = nextExitScope ? nextExitScope->vsdLifeEnd : offset; START_FINDING_SCOPES: while ((scope = compGetNextEnterScope(offs, true)) != nullptr) { foundEnter = true; if ((nextExitScope && scope->vsdLifeBeg >= nextExitScope->vsdLifeEnd) || (scope->vsdLifeBeg > curEnterOffs)) { // We overshot the last found exit scope. Save the scope for later // and find an exiting scope nextEnterScope = scope; break; } (this->*enterScopeFn)(inScope, scope); if (!nextExitScope) { curEnterOffs = scope->vsdLifeBeg; } } } while (foundExit || foundEnter); } #if defined(DEBUG) void Compiler::compDispScopeLists() { unsigned i; printf("Local variable scopes = %d\n", info.compVarScopesCount); if (info.compVarScopesCount) { printf(" \tVarNum \tLVNum \t Name \tBeg \tEnd\n"); } printf("Sorted by enter scope:\n"); for (i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* varScope = compEnterScopeList[i]; assert(varScope); printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh", i, varScope->vsdVarNum, varScope->vsdLVnum, VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName), varScope->vsdLifeBeg, varScope->vsdLifeEnd); if (compNextEnterScope == i) { printf(" <-- next enter scope"); } printf("\n"); } printf("Sorted by exit scope:\n"); for (i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* varScope = compExitScopeList[i]; assert(varScope); printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh", i, varScope->vsdVarNum, varScope->vsdLVnum, VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName), varScope->vsdLifeBeg, varScope->vsdLifeEnd); if (compNextExitScope == i) { printf(" <-- next exit scope"); } printf("\n"); } } void Compiler::compDispLocalVars() { printf("info.compVarScopesCount = %d\n", info.compVarScopesCount); if (info.compVarScopesCount > 0) { printf(" \tVarNum \tLVNum \t Name \tBeg \tEnd\n"); } for (unsigned i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* varScope = &info.compVarScopes[i]; printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh\n", i, varScope->vsdVarNum, varScope->vsdLVnum, VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName), varScope->vsdLifeBeg, varScope->vsdLifeEnd); } } #endif // DEBUG /*****************************************************************************/ #if MEASURE_CLRAPI_CALLS struct WrapICorJitInfo : public ICorJitInfo { //------------------------------------------------------------------------ // WrapICorJitInfo::makeOne: allocate an instance of WrapICorJitInfo // // Arguments: // alloc - the allocator to get memory from for the instance // compile - the compiler instance // compHndRef - the ICorJitInfo handle from the EE; the caller's // copy may be replaced with a "wrapper" instance // // Return Value: // If the config flags indicate that ICorJitInfo should be wrapped, // we return the "wrapper" instance; otherwise we return "nullptr". static WrapICorJitInfo* makeOne(ArenaAllocator* alloc, Compiler* compiler, COMP_HANDLE& compHndRef /* INOUT */) { WrapICorJitInfo* wrap = nullptr; if (JitConfig.JitEECallTimingInfo() != 0) { // It's too early to use the default allocator, so we do this // in two steps to be safe (the constructor doesn't need to do // anything except fill in the vtable pointer, so we let the // compiler do it). void* inst = alloc->allocateMemory(roundUp(sizeof(WrapICorJitInfo))); if (inst != nullptr) { // If you get a build error here due to 'WrapICorJitInfo' being // an abstract class, it's very likely that the wrapper bodies // in ICorJitInfo_API_wrapper.hpp are no longer in sync with // the EE interface; please be kind and update the header file. wrap = new (inst, jitstd::placement_t()) WrapICorJitInfo(); wrap->wrapComp = compiler; // Save the real handle and replace it with our wrapped version. wrap->wrapHnd = compHndRef; compHndRef = wrap; } } return wrap; } private: Compiler* wrapComp; COMP_HANDLE wrapHnd; // the "real thing" public: #include "ICorJitInfo_API_wrapper.hpp" }; #endif // MEASURE_CLRAPI_CALLS /*****************************************************************************/ // Compile a single method int jitNativeCode(CORINFO_METHOD_HANDLE methodHnd, CORINFO_MODULE_HANDLE classPtr, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags, void* inlineInfoPtr) { // // A non-NULL inlineInfo means we are compiling the inlinee method. // InlineInfo* inlineInfo = (InlineInfo*)inlineInfoPtr; bool jitFallbackCompile = false; START: int result = CORJIT_INTERNALERROR; ArenaAllocator* pAlloc = nullptr; ArenaAllocator alloc; #if MEASURE_CLRAPI_CALLS WrapICorJitInfo* wrapCLR = nullptr; #endif if (inlineInfo) { // Use inliner's memory allocator when compiling the inlinee. pAlloc = inlineInfo->InlinerCompiler->compGetArenaAllocator(); } else { pAlloc = &alloc; } Compiler* pComp; pComp = nullptr; struct Param { Compiler* pComp; ArenaAllocator* pAlloc; bool jitFallbackCompile; CORINFO_METHOD_HANDLE methodHnd; CORINFO_MODULE_HANDLE classPtr; COMP_HANDLE compHnd; CORINFO_METHOD_INFO* methodInfo; void** methodCodePtr; uint32_t* methodCodeSize; JitFlags* compileFlags; InlineInfo* inlineInfo; #if MEASURE_CLRAPI_CALLS WrapICorJitInfo* wrapCLR; #endif int result; } param; param.pComp = nullptr; param.pAlloc = pAlloc; param.jitFallbackCompile = jitFallbackCompile; param.methodHnd = methodHnd; param.classPtr = classPtr; param.compHnd = compHnd; param.methodInfo = methodInfo; param.methodCodePtr = methodCodePtr; param.methodCodeSize = methodCodeSize; param.compileFlags = compileFlags; param.inlineInfo = inlineInfo; #if MEASURE_CLRAPI_CALLS param.wrapCLR = nullptr; #endif param.result = result; setErrorTrap(compHnd, Param*, pParamOuter, &param) { setErrorTrap(nullptr, Param*, pParam, pParamOuter) { if (pParam->inlineInfo) { // Lazily create the inlinee compiler object if (pParam->inlineInfo->InlinerCompiler->InlineeCompiler == nullptr) { pParam->inlineInfo->InlinerCompiler->InlineeCompiler = (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp))); } // Use the inlinee compiler object pParam->pComp = pParam->inlineInfo->InlinerCompiler->InlineeCompiler; #ifdef DEBUG // memset(pParam->pComp, 0xEE, sizeof(Compiler)); #endif } else { // Allocate create the inliner compiler object pParam->pComp = (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp))); } #if MEASURE_CLRAPI_CALLS pParam->wrapCLR = WrapICorJitInfo::makeOne(pParam->pAlloc, pParam->pComp, pParam->compHnd); #endif // push this compiler on the stack (TLS) pParam->pComp->prevCompiler = JitTls::GetCompiler(); JitTls::SetCompiler(pParam->pComp); // PREFIX_ASSUME gets turned into ASSERT_CHECK and we cannot have it here #if defined(_PREFAST_) || defined(_PREFIX_) PREFIX_ASSUME(pParam->pComp != NULL); #else assert(pParam->pComp != nullptr); #endif pParam->pComp->compInit(pParam->pAlloc, pParam->methodHnd, pParam->compHnd, pParam->methodInfo, pParam->inlineInfo); #ifdef DEBUG pParam->pComp->jitFallbackCompile = pParam->jitFallbackCompile; #endif // Now generate the code pParam->result = pParam->pComp->compCompile(pParam->classPtr, pParam->methodCodePtr, pParam->methodCodeSize, pParam->compileFlags); } finallyErrorTrap() { Compiler* pCompiler = pParamOuter->pComp; // If OOM is thrown when allocating memory for a pComp, we will end up here. // For this case, pComp and also pCompiler will be a nullptr // if (pCompiler != nullptr) { pCompiler->info.compCode = nullptr; // pop the compiler off the TLS stack only if it was linked above assert(JitTls::GetCompiler() == pCompiler); JitTls::SetCompiler(pCompiler->prevCompiler); } if (pParamOuter->inlineInfo == nullptr) { // Free up the allocator we were using pParamOuter->pAlloc->destroy(); } } endErrorTrap() } impJitErrorTrap() { // If we were looking at an inlinee.... if (inlineInfo != nullptr) { // Note that we failed to compile the inlinee, and that // there's no point trying to inline it again anywhere else. inlineInfo->inlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR); } param.result = __errc; } endErrorTrap() result = param.result; if (!inlineInfo && (result == CORJIT_INTERNALERROR || result == CORJIT_RECOVERABLEERROR || result == CORJIT_IMPLLIMITATION) && !jitFallbackCompile) { // If we failed the JIT, reattempt with debuggable code. jitFallbackCompile = true; // Update the flags for 'safer' code generation. compileFlags->Set(JitFlags::JIT_FLAG_MIN_OPT); compileFlags->Clear(JitFlags::JIT_FLAG_SIZE_OPT); compileFlags->Clear(JitFlags::JIT_FLAG_SPEED_OPT); goto START; } return result; } #if defined(UNIX_AMD64_ABI) // GetTypeFromClassificationAndSizes: // Returns the type of the eightbyte accounting for the classification and size of the eightbyte. // // args: // classType: classification type // size: size of the eightbyte. // // static var_types Compiler::GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size) { var_types type = TYP_UNKNOWN; switch (classType) { case SystemVClassificationTypeInteger: if (size == 1) { type = TYP_BYTE; } else if (size <= 2) { type = TYP_SHORT; } else if (size <= 4) { type = TYP_INT; } else if (size <= 8) { type = TYP_LONG; } else { assert(false && "GetTypeFromClassificationAndSizes Invalid Integer classification type."); } break; case SystemVClassificationTypeIntegerReference: type = TYP_REF; break; case SystemVClassificationTypeIntegerByRef: type = TYP_BYREF; break; case SystemVClassificationTypeSSE: if (size <= 4) { type = TYP_FLOAT; } else if (size <= 8) { type = TYP_DOUBLE; } else { assert(false && "GetTypeFromClassificationAndSizes Invalid SSE classification type."); } break; default: assert(false && "GetTypeFromClassificationAndSizes Invalid classification type."); break; } return type; } //------------------------------------------------------------------- // GetEightByteType: Returns the type of eightbyte slot of a struct // // Arguments: // structDesc - struct classification description. // slotNum - eightbyte slot number for the struct. // // Return Value: // type of the eightbyte slot of the struct // // static var_types Compiler::GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, unsigned slotNum) { var_types eightByteType = TYP_UNDEF; unsigned len = structDesc.eightByteSizes[slotNum]; switch (structDesc.eightByteClassifications[slotNum]) { case SystemVClassificationTypeInteger: // See typelist.h for jit type definition. // All the types of size < 4 bytes are of jit type TYP_INT. if (structDesc.eightByteSizes[slotNum] <= 4) { eightByteType = TYP_INT; } else if (structDesc.eightByteSizes[slotNum] <= 8) { eightByteType = TYP_LONG; } else { assert(false && "GetEightByteType Invalid Integer classification type."); } break; case SystemVClassificationTypeIntegerReference: assert(len == REGSIZE_BYTES); eightByteType = TYP_REF; break; case SystemVClassificationTypeIntegerByRef: assert(len == REGSIZE_BYTES); eightByteType = TYP_BYREF; break; case SystemVClassificationTypeSSE: if (structDesc.eightByteSizes[slotNum] <= 4) { eightByteType = TYP_FLOAT; } else if (structDesc.eightByteSizes[slotNum] <= 8) { eightByteType = TYP_DOUBLE; } else { assert(false && "GetEightByteType Invalid SSE classification type."); } break; default: assert(false && "GetEightByteType Invalid classification type."); break; } return eightByteType; } //------------------------------------------------------------------------------------------------------ // GetStructTypeOffset: Gets the type, size and offset of the eightbytes of a struct for System V systems. // // Arguments: // 'structDesc' - struct description // 'type0' - out param; returns the type of the first eightbyte. // 'type1' - out param; returns the type of the second eightbyte. // 'offset0' - out param; returns the offset of the first eightbyte. // 'offset1' - out param; returns the offset of the second eightbyte. // // static void Compiler::GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1) { *offset0 = structDesc.eightByteOffsets[0]; *offset1 = structDesc.eightByteOffsets[1]; *type0 = TYP_UNKNOWN; *type1 = TYP_UNKNOWN; // Set the first eightbyte data if (structDesc.eightByteCount >= 1) { *type0 = GetEightByteType(structDesc, 0); } // Set the second eight byte data if (structDesc.eightByteCount == 2) { *type1 = GetEightByteType(structDesc, 1); } } //------------------------------------------------------------------------------------------------------ // GetStructTypeOffset: Gets the type, size and offset of the eightbytes of a struct for System V systems. // // Arguments: // 'typeHnd' - type handle // 'type0' - out param; returns the type of the first eightbyte. // 'type1' - out param; returns the type of the second eightbyte. // 'offset0' - out param; returns the offset of the first eightbyte. // 'offset1' - out param; returns the offset of the second eightbyte. // void Compiler::GetStructTypeOffset(CORINFO_CLASS_HANDLE typeHnd, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1) { SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc); assert(structDesc.passedInRegisters); GetStructTypeOffset(structDesc, type0, type1, offset0, offset1); } #endif // defined(UNIX_AMD64_ABI) /*****************************************************************************/ /*****************************************************************************/ #ifdef DEBUG Compiler::NodeToIntMap* Compiler::FindReachableNodesInNodeTestData() { NodeToIntMap* reachable = new (getAllocatorDebugOnly()) NodeToIntMap(getAllocatorDebugOnly()); if (m_nodeTestData == nullptr) { return reachable; } // Otherwise, iterate. for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->NonPhiStatements()) { for (GenTree* const tree : stmt->TreeList()) { TestLabelAndNum tlAndN; // For call nodes, translate late args to what they stand for. if (tree->OperGet() == GT_CALL) { GenTreeCall* call = tree->AsCall(); unsigned i = 0; for (GenTreeCall::Use& use : call->Args()) { if ((use.GetNode()->gtFlags & GTF_LATE_ARG) != 0) { // Find the corresponding late arg. GenTree* lateArg = call->fgArgInfo->GetArgNode(i); if (GetNodeTestData()->Lookup(lateArg, &tlAndN)) { reachable->Set(lateArg, 0); } } i++; } } if (GetNodeTestData()->Lookup(tree, &tlAndN)) { reachable->Set(tree, 0); } } } } return reachable; } void Compiler::TransferTestDataToNode(GenTree* from, GenTree* to) { TestLabelAndNum tlAndN; // We can't currently associate multiple annotations with a single node. // If we need to, we can fix this... // If the table is null, don't create it just to do the lookup, which would fail... if (m_nodeTestData != nullptr && GetNodeTestData()->Lookup(from, &tlAndN)) { assert(!GetNodeTestData()->Lookup(to, &tlAndN)); // We can't currently associate multiple annotations with a single node. // If we need to, we can fix this... TestLabelAndNum tlAndNTo; assert(!GetNodeTestData()->Lookup(to, &tlAndNTo)); GetNodeTestData()->Remove(from); GetNodeTestData()->Set(to, tlAndN); } } #endif // DEBUG /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX jvc XX XX XX XX Functions for the stand-alone version of the JIT . XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ void codeGeneratorCodeSizeBeg() { } /***************************************************************************** * * Used for counting pointer assignments. */ /*****************************************************************************/ void codeGeneratorCodeSizeEnd() { } /***************************************************************************** * * Gather statistics - mainly used for the standalone * Enable various #ifdef's to get the information you need */ void Compiler::compJitStats() { #if CALL_ARG_STATS /* Method types and argument statistics */ compCallArgStats(); #endif // CALL_ARG_STATS } #if CALL_ARG_STATS /***************************************************************************** * * Gather statistics about method calls and arguments */ void Compiler::compCallArgStats() { unsigned argNum; unsigned argDWordNum; unsigned argLngNum; unsigned argFltNum; unsigned argDblNum; unsigned regArgNum; unsigned regArgDeferred; unsigned regArgTemp; unsigned regArgLclVar; unsigned regArgConst; unsigned argTempsThisMethod = 0; assert(fgStmtListThreaded); for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { for (GenTree* const call : stmt->TreeList()) { if (call->gtOper != GT_CALL) continue; argNum = regArgNum = regArgDeferred = regArgTemp = regArgConst = regArgLclVar = argDWordNum = argLngNum = argFltNum = argDblNum = 0; argTotalCalls++; if (call->AsCall()->gtCallThisArg == nullptr) { if (call->AsCall()->gtCallType == CT_HELPER) { argHelperCalls++; } else { argStaticCalls++; } } else { /* We have a 'this' pointer */ argDWordNum++; argNum++; regArgNum++; regArgDeferred++; argTotalObjPtr++; if (call->AsCall()->IsVirtual()) { /* virtual function */ argVirtualCalls++; } else { argNonVirtualCalls++; } } } } } argTempsCntTable.record(argTempsThisMethod); if (argMaxTempsPerMethod < argTempsThisMethod) { argMaxTempsPerMethod = argTempsThisMethod; } } /* static */ void Compiler::compDispCallArgStats(FILE* fout) { if (argTotalCalls == 0) return; fprintf(fout, "\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Call stats\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Total # of calls = %d, calls / method = %.3f\n\n", argTotalCalls, (float)argTotalCalls / genMethodCnt); fprintf(fout, "Percentage of helper calls = %4.2f %%\n", (float)(100 * argHelperCalls) / argTotalCalls); fprintf(fout, "Percentage of static calls = %4.2f %%\n", (float)(100 * argStaticCalls) / argTotalCalls); fprintf(fout, "Percentage of virtual calls = %4.2f %%\n", (float)(100 * argVirtualCalls) / argTotalCalls); fprintf(fout, "Percentage of non-virtual calls = %4.2f %%\n\n", (float)(100 * argNonVirtualCalls) / argTotalCalls); fprintf(fout, "Average # of arguments per call = %.2f%%\n\n", (float)argTotalArgs / argTotalCalls); fprintf(fout, "Percentage of DWORD arguments = %.2f %%\n", (float)(100 * argTotalDWordArgs) / argTotalArgs); fprintf(fout, "Percentage of LONG arguments = %.2f %%\n", (float)(100 * argTotalLongArgs) / argTotalArgs); fprintf(fout, "Percentage of FLOAT arguments = %.2f %%\n", (float)(100 * argTotalFloatArgs) / argTotalArgs); fprintf(fout, "Percentage of DOUBLE arguments = %.2f %%\n\n", (float)(100 * argTotalDoubleArgs) / argTotalArgs); if (argTotalRegArgs == 0) return; /* fprintf(fout, "Total deferred arguments = %d \n", argTotalDeferred); fprintf(fout, "Total temp arguments = %d \n\n", argTotalTemps); fprintf(fout, "Total 'this' arguments = %d \n", argTotalObjPtr); fprintf(fout, "Total local var arguments = %d \n", argTotalLclVar); fprintf(fout, "Total constant arguments = %d \n\n", argTotalConst); */ fprintf(fout, "\nRegister Arguments:\n\n"); fprintf(fout, "Percentage of deferred arguments = %.2f %%\n", (float)(100 * argTotalDeferred) / argTotalRegArgs); fprintf(fout, "Percentage of temp arguments = %.2f %%\n\n", (float)(100 * argTotalTemps) / argTotalRegArgs); fprintf(fout, "Maximum # of temps per method = %d\n\n", argMaxTempsPerMethod); fprintf(fout, "Percentage of ObjPtr arguments = %.2f %%\n", (float)(100 * argTotalObjPtr) / argTotalRegArgs); // fprintf(fout, "Percentage of global arguments = %.2f %%\n", (float)(100 * argTotalDWordGlobEf) / // argTotalRegArgs); fprintf(fout, "Percentage of constant arguments = %.2f %%\n", (float)(100 * argTotalConst) / argTotalRegArgs); fprintf(fout, "Percentage of lcl var arguments = %.2f %%\n\n", (float)(100 * argTotalLclVar) / argTotalRegArgs); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Argument count frequency table (includes ObjPtr):\n"); fprintf(fout, "--------------------------------------------------\n"); argCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "DWORD argument count frequency table (w/o LONG):\n"); fprintf(fout, "--------------------------------------------------\n"); argDWordCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Temps count frequency table (per method):\n"); fprintf(fout, "--------------------------------------------------\n"); argTempsCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); /* fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "DWORD argument count frequency table (w/ LONG):\n"); fprintf(fout, "--------------------------------------------------\n"); argDWordLngCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); */ } #endif // CALL_ARG_STATS // JIT time end to end, and by phases. #ifdef FEATURE_JIT_METHOD_PERF // Static variables CritSecObject CompTimeSummaryInfo::s_compTimeSummaryLock; CompTimeSummaryInfo CompTimeSummaryInfo::s_compTimeSummary; #if MEASURE_CLRAPI_CALLS double JitTimer::s_cyclesPerSec = CachedCyclesPerSecond(); #endif #endif // FEATURE_JIT_METHOD_PERF #if defined(FEATURE_JIT_METHOD_PERF) || DUMP_FLOWGRAPHS || defined(FEATURE_TRACELOGGING) const char* PhaseNames[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) string_nm, #include "compphases.h" }; const char* PhaseEnums[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) #enum_nm, #include "compphases.h" }; const LPCWSTR PhaseShortNames[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) W(short_nm), #include "compphases.h" }; #endif // defined(FEATURE_JIT_METHOD_PERF) || DUMP_FLOWGRAPHS #ifdef FEATURE_JIT_METHOD_PERF bool PhaseHasChildren[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) hasChildren, #include "compphases.h" }; int PhaseParent[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) parent, #include "compphases.h" }; bool PhaseReportsIRSize[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) measureIR, #include "compphases.h" }; CompTimeInfo::CompTimeInfo(unsigned byteCodeBytes) : m_byteCodeBytes(byteCodeBytes) , m_totalCycles(0) , m_parentPhaseEndSlop(0) , m_timerFailure(false) #if MEASURE_CLRAPI_CALLS , m_allClrAPIcalls(0) , m_allClrAPIcycles(0) #endif { for (int i = 0; i < PHASE_NUMBER_OF; i++) { m_invokesByPhase[i] = 0; m_cyclesByPhase[i] = 0; #if MEASURE_CLRAPI_CALLS m_CLRinvokesByPhase[i] = 0; m_CLRcyclesByPhase[i] = 0; #endif } #if MEASURE_CLRAPI_CALLS assert(ArrLen(m_perClrAPIcalls) == API_ICorJitInfo_Names::API_COUNT); assert(ArrLen(m_perClrAPIcycles) == API_ICorJitInfo_Names::API_COUNT); assert(ArrLen(m_maxClrAPIcycles) == API_ICorJitInfo_Names::API_COUNT); for (int i = 0; i < API_ICorJitInfo_Names::API_COUNT; i++) { m_perClrAPIcalls[i] = 0; m_perClrAPIcycles[i] = 0; m_maxClrAPIcycles[i] = 0; } #endif } bool CompTimeSummaryInfo::IncludedInFilteredData(CompTimeInfo& info) { return false; // info.m_byteCodeBytes < 10; } //------------------------------------------------------------------------ // CompTimeSummaryInfo::AddInfo: Record timing info from one compile. // // Arguments: // info - The timing information to record. // includePhases - If "true", the per-phase info in "info" is valid, // which means that a "normal" compile has ended; if // the value is "false" we are recording the results // of a partial compile (typically an import-only run // on behalf of the inliner) in which case the phase // info is not valid and so we only record EE call // overhead. void CompTimeSummaryInfo::AddInfo(CompTimeInfo& info, bool includePhases) { if (info.m_timerFailure) { return; // Don't update if there was a failure. } CritSecHolder timeLock(s_compTimeSummaryLock); if (includePhases) { bool includeInFiltered = IncludedInFilteredData(info); m_numMethods++; // Update the totals and maxima. m_total.m_byteCodeBytes += info.m_byteCodeBytes; m_maximum.m_byteCodeBytes = max(m_maximum.m_byteCodeBytes, info.m_byteCodeBytes); m_total.m_totalCycles += info.m_totalCycles; m_maximum.m_totalCycles = max(m_maximum.m_totalCycles, info.m_totalCycles); #if MEASURE_CLRAPI_CALLS // Update the CLR-API values. m_total.m_allClrAPIcalls += info.m_allClrAPIcalls; m_maximum.m_allClrAPIcalls = max(m_maximum.m_allClrAPIcalls, info.m_allClrAPIcalls); m_total.m_allClrAPIcycles += info.m_allClrAPIcycles; m_maximum.m_allClrAPIcycles = max(m_maximum.m_allClrAPIcycles, info.m_allClrAPIcycles); #endif if (includeInFiltered) { m_numFilteredMethods++; m_filtered.m_byteCodeBytes += info.m_byteCodeBytes; m_filtered.m_totalCycles += info.m_totalCycles; m_filtered.m_parentPhaseEndSlop += info.m_parentPhaseEndSlop; } for (int i = 0; i < PHASE_NUMBER_OF; i++) { m_total.m_invokesByPhase[i] += info.m_invokesByPhase[i]; m_total.m_cyclesByPhase[i] += info.m_cyclesByPhase[i]; #if MEASURE_CLRAPI_CALLS m_total.m_CLRinvokesByPhase[i] += info.m_CLRinvokesByPhase[i]; m_total.m_CLRcyclesByPhase[i] += info.m_CLRcyclesByPhase[i]; #endif if (includeInFiltered) { m_filtered.m_invokesByPhase[i] += info.m_invokesByPhase[i]; m_filtered.m_cyclesByPhase[i] += info.m_cyclesByPhase[i]; #if MEASURE_CLRAPI_CALLS m_filtered.m_CLRinvokesByPhase[i] += info.m_CLRinvokesByPhase[i]; m_filtered.m_CLRcyclesByPhase[i] += info.m_CLRcyclesByPhase[i]; #endif } m_maximum.m_cyclesByPhase[i] = max(m_maximum.m_cyclesByPhase[i], info.m_cyclesByPhase[i]); #if MEASURE_CLRAPI_CALLS m_maximum.m_CLRcyclesByPhase[i] = max(m_maximum.m_CLRcyclesByPhase[i], info.m_CLRcyclesByPhase[i]); #endif } m_total.m_parentPhaseEndSlop += info.m_parentPhaseEndSlop; m_maximum.m_parentPhaseEndSlop = max(m_maximum.m_parentPhaseEndSlop, info.m_parentPhaseEndSlop); } #if MEASURE_CLRAPI_CALLS else { m_totMethods++; // Update the "global" CLR-API values. m_total.m_allClrAPIcalls += info.m_allClrAPIcalls; m_maximum.m_allClrAPIcalls = max(m_maximum.m_allClrAPIcalls, info.m_allClrAPIcalls); m_total.m_allClrAPIcycles += info.m_allClrAPIcycles; m_maximum.m_allClrAPIcycles = max(m_maximum.m_allClrAPIcycles, info.m_allClrAPIcycles); // Update the per-phase CLR-API values. m_total.m_invokesByPhase[PHASE_CLR_API] += info.m_allClrAPIcalls; m_maximum.m_invokesByPhase[PHASE_CLR_API] = max(m_maximum.m_perClrAPIcalls[PHASE_CLR_API], info.m_allClrAPIcalls); m_total.m_cyclesByPhase[PHASE_CLR_API] += info.m_allClrAPIcycles; m_maximum.m_cyclesByPhase[PHASE_CLR_API] = max(m_maximum.m_cyclesByPhase[PHASE_CLR_API], info.m_allClrAPIcycles); } for (int i = 0; i < API_ICorJitInfo_Names::API_COUNT; i++) { m_total.m_perClrAPIcalls[i] += info.m_perClrAPIcalls[i]; m_maximum.m_perClrAPIcalls[i] = max(m_maximum.m_perClrAPIcalls[i], info.m_perClrAPIcalls[i]); m_total.m_perClrAPIcycles[i] += info.m_perClrAPIcycles[i]; m_maximum.m_perClrAPIcycles[i] = max(m_maximum.m_perClrAPIcycles[i], info.m_perClrAPIcycles[i]); m_maximum.m_maxClrAPIcycles[i] = max(m_maximum.m_maxClrAPIcycles[i], info.m_maxClrAPIcycles[i]); } #endif } // Static LPCWSTR Compiler::compJitTimeLogFilename = nullptr; void CompTimeSummaryInfo::Print(FILE* f) { if (f == nullptr) { return; } // Otherwise... double countsPerSec = CachedCyclesPerSecond(); if (countsPerSec == 0.0) { fprintf(f, "Processor does not have a high-frequency timer.\n"); return; } double totTime_ms = 0.0; fprintf(f, "JIT Compilation time report:\n"); fprintf(f, " Compiled %d methods.\n", m_numMethods); if (m_numMethods != 0) { fprintf(f, " Compiled %d bytecodes total (%d max, %8.2f avg).\n", m_total.m_byteCodeBytes, m_maximum.m_byteCodeBytes, (double)m_total.m_byteCodeBytes / (double)m_numMethods); totTime_ms = ((double)m_total.m_totalCycles / countsPerSec) * 1000.0; fprintf(f, " Time: total: %10.3f Mcycles/%10.3f ms\n", ((double)m_total.m_totalCycles / 1000000.0), totTime_ms); fprintf(f, " max: %10.3f Mcycles/%10.3f ms\n", ((double)m_maximum.m_totalCycles) / 1000000.0, ((double)m_maximum.m_totalCycles / countsPerSec) * 1000.0); fprintf(f, " avg: %10.3f Mcycles/%10.3f ms\n", ((double)m_total.m_totalCycles) / 1000000.0 / (double)m_numMethods, totTime_ms / (double)m_numMethods); const char* extraHdr1 = ""; const char* extraHdr2 = ""; #if MEASURE_CLRAPI_CALLS bool extraInfo = (JitConfig.JitEECallTimingInfo() != 0); if (extraInfo) { extraHdr1 = " CLRs/meth % in CLR"; extraHdr2 = "-----------------------"; } #endif fprintf(f, "\n Total time by phases:\n"); fprintf(f, " PHASE inv/meth Mcycles time (ms) %% of total max (ms)%s\n", extraHdr1); fprintf(f, " ---------------------------------------------------------------------------------------%s\n", extraHdr2); // Ensure that at least the names array and the Phases enum have the same number of entries: assert(ArrLen(PhaseNames) == PHASE_NUMBER_OF); for (int i = 0; i < PHASE_NUMBER_OF; i++) { double phase_tot_ms = (((double)m_total.m_cyclesByPhase[i]) / countsPerSec) * 1000.0; double phase_max_ms = (((double)m_maximum.m_cyclesByPhase[i]) / countsPerSec) * 1000.0; #if MEASURE_CLRAPI_CALLS // Skip showing CLR API call info if we didn't collect any if (i == PHASE_CLR_API && !extraInfo) continue; #endif // Indent nested phases, according to depth. int ancPhase = PhaseParent[i]; while (ancPhase != -1) { fprintf(f, " "); ancPhase = PhaseParent[ancPhase]; } fprintf(f, " %-30s %6.2f %10.2f %9.3f %8.2f%% %8.3f", PhaseNames[i], ((double)m_total.m_invokesByPhase[i]) / ((double)m_numMethods), ((double)m_total.m_cyclesByPhase[i]) / 1000000.0, phase_tot_ms, (phase_tot_ms * 100.0 / totTime_ms), phase_max_ms); #if MEASURE_CLRAPI_CALLS if (extraInfo && i != PHASE_CLR_API) { double nest_tot_ms = (((double)m_total.m_CLRcyclesByPhase[i]) / countsPerSec) * 1000.0; double nest_percent = nest_tot_ms * 100.0 / totTime_ms; double calls_per_fn = ((double)m_total.m_CLRinvokesByPhase[i]) / ((double)m_numMethods); if (nest_percent > 0.1 || calls_per_fn > 10) fprintf(f, " %5.1f %8.2f%%", calls_per_fn, nest_percent); } #endif fprintf(f, "\n"); } // Show slop if it's over a certain percentage of the total double pslop_pct = 100.0 * m_total.m_parentPhaseEndSlop * 1000.0 / countsPerSec / totTime_ms; if (pslop_pct >= 1.0) { fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = " "%3.1f%% of total.\n\n", m_total.m_parentPhaseEndSlop / 1000000.0, pslop_pct); } } if (m_numFilteredMethods > 0) { fprintf(f, " Compiled %d methods that meet the filter requirement.\n", m_numFilteredMethods); fprintf(f, " Compiled %d bytecodes total (%8.2f avg).\n", m_filtered.m_byteCodeBytes, (double)m_filtered.m_byteCodeBytes / (double)m_numFilteredMethods); double totTime_ms = ((double)m_filtered.m_totalCycles / countsPerSec) * 1000.0; fprintf(f, " Time: total: %10.3f Mcycles/%10.3f ms\n", ((double)m_filtered.m_totalCycles / 1000000.0), totTime_ms); fprintf(f, " avg: %10.3f Mcycles/%10.3f ms\n", ((double)m_filtered.m_totalCycles) / 1000000.0 / (double)m_numFilteredMethods, totTime_ms / (double)m_numFilteredMethods); fprintf(f, " Total time by phases:\n"); fprintf(f, " PHASE inv/meth Mcycles time (ms) %% of total\n"); fprintf(f, " --------------------------------------------------------------------------------------\n"); // Ensure that at least the names array and the Phases enum have the same number of entries: assert(ArrLen(PhaseNames) == PHASE_NUMBER_OF); for (int i = 0; i < PHASE_NUMBER_OF; i++) { double phase_tot_ms = (((double)m_filtered.m_cyclesByPhase[i]) / countsPerSec) * 1000.0; // Indent nested phases, according to depth. int ancPhase = PhaseParent[i]; while (ancPhase != -1) { fprintf(f, " "); ancPhase = PhaseParent[ancPhase]; } fprintf(f, " %-30s %5.2f %10.2f %9.3f %8.2f%%\n", PhaseNames[i], ((double)m_filtered.m_invokesByPhase[i]) / ((double)m_numFilteredMethods), ((double)m_filtered.m_cyclesByPhase[i]) / 1000000.0, phase_tot_ms, (phase_tot_ms * 100.0 / totTime_ms)); } double fslop_ms = m_filtered.m_parentPhaseEndSlop * 1000.0 / countsPerSec; if (fslop_ms > 1.0) { fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = " "%3.1f%% of total.\n\n", m_filtered.m_parentPhaseEndSlop / 1000000.0, fslop_ms); } } #if MEASURE_CLRAPI_CALLS if (m_total.m_allClrAPIcalls > 0 && m_total.m_allClrAPIcycles > 0) { fprintf(f, "\n"); if (m_totMethods > 0) fprintf(f, " Imported %u methods.\n\n", m_numMethods + m_totMethods); fprintf(f, " CLR API # calls total time max time avg time %% " "of total\n"); fprintf(f, " -------------------------------------------------------------------------------"); fprintf(f, "---------------------\n"); static const char* APInames[] = { #define DEF_CLR_API(name) #name, #include "ICorJitInfo_API_names.h" }; unsigned shownCalls = 0; double shownMillis = 0.0; #ifdef DEBUG unsigned checkedCalls = 0; double checkedMillis = 0.0; #endif for (unsigned pass = 0; pass < 2; pass++) { for (unsigned i = 0; i < API_ICorJitInfo_Names::API_COUNT; i++) { unsigned calls = m_total.m_perClrAPIcalls[i]; if (calls == 0) continue; unsigned __int64 cycles = m_total.m_perClrAPIcycles[i]; double millis = 1000.0 * cycles / countsPerSec; // Don't show the small fry to keep the results manageable if (millis < 0.5) { // We always show the following API because it is always called // exactly once for each method and its body is the simplest one // possible (it just returns an integer constant), and therefore // it can be used to measure the overhead of adding the CLR API // timing code. Roughly speaking, on a 3GHz x64 box the overhead // per call should be around 40 ns when using RDTSC, compared to // about 140 ns when using GetThreadCycles() under Windows. if (i != API_ICorJitInfo_Names::API_getExpectedTargetArchitecture) continue; } // In the first pass we just compute the totals. if (pass == 0) { shownCalls += m_total.m_perClrAPIcalls[i]; shownMillis += millis; continue; } unsigned __int32 maxcyc = m_maximum.m_maxClrAPIcycles[i]; double max_ms = 1000.0 * maxcyc / countsPerSec; fprintf(f, " %-40s", APInames[i]); // API name fprintf(f, " %8u %9.1f ms", calls, millis); // #calls, total time fprintf(f, " %8.1f ms %8.1f ns", max_ms, 1000000.0 * millis / calls); // max, avg time fprintf(f, " %5.1f%%\n", 100.0 * millis / shownMillis); // % of total #ifdef DEBUG checkedCalls += m_total.m_perClrAPIcalls[i]; checkedMillis += millis; #endif } } #ifdef DEBUG assert(checkedCalls == shownCalls); assert(checkedMillis == shownMillis); #endif if (shownCalls > 0 || shownMillis > 0) { fprintf(f, " -------------------------"); fprintf(f, "---------------------------------------------------------------------------\n"); fprintf(f, " Total for calls shown above %8u %10.1f ms", shownCalls, shownMillis); if (totTime_ms > 0.0) fprintf(f, " (%4.1lf%% of overall JIT time)", shownMillis * 100.0 / totTime_ms); fprintf(f, "\n"); } fprintf(f, "\n"); } #endif fprintf(f, "\n"); } JitTimer::JitTimer(unsigned byteCodeSize) : m_info(byteCodeSize) { #if MEASURE_CLRAPI_CALLS m_CLRcallInvokes = 0; m_CLRcallCycles = 0; #endif #ifdef DEBUG m_lastPhase = (Phases)-1; #if MEASURE_CLRAPI_CALLS m_CLRcallAPInum = -1; #endif #endif unsigned __int64 threadCurCycles; if (_our_GetThreadCycles(&threadCurCycles)) { m_start = threadCurCycles; m_curPhaseStart = threadCurCycles; } } void JitTimer::EndPhase(Compiler* compiler, Phases phase) { // Otherwise... // We re-run some phases currently, so this following assert doesn't work. // assert((int)phase > (int)m_lastPhase); // We should end phases in increasing order. unsigned __int64 threadCurCycles; if (_our_GetThreadCycles(&threadCurCycles)) { unsigned __int64 phaseCycles = (threadCurCycles - m_curPhaseStart); // If this is not a leaf phase, the assumption is that the last subphase must have just recently ended. // Credit the duration to "slop", the total of which should be very small. if (PhaseHasChildren[phase]) { m_info.m_parentPhaseEndSlop += phaseCycles; } else { // It is a leaf phase. Credit duration to it. m_info.m_invokesByPhase[phase]++; m_info.m_cyclesByPhase[phase] += phaseCycles; #if MEASURE_CLRAPI_CALLS // Record the CLR API timing info as well. m_info.m_CLRinvokesByPhase[phase] += m_CLRcallInvokes; m_info.m_CLRcyclesByPhase[phase] += m_CLRcallCycles; #endif // Credit the phase's ancestors, if any. int ancPhase = PhaseParent[phase]; while (ancPhase != -1) { m_info.m_cyclesByPhase[ancPhase] += phaseCycles; ancPhase = PhaseParent[ancPhase]; } #if MEASURE_CLRAPI_CALLS const Phases lastPhase = PHASE_CLR_API; #else const Phases lastPhase = PHASE_NUMBER_OF; #endif if (phase + 1 == lastPhase) { m_info.m_totalCycles = (threadCurCycles - m_start); } else { m_curPhaseStart = threadCurCycles; } } if ((JitConfig.JitMeasureIR() != 0) && PhaseReportsIRSize[phase]) { m_info.m_nodeCountAfterPhase[phase] = compiler->fgMeasureIR(); } else { m_info.m_nodeCountAfterPhase[phase] = 0; } } #ifdef DEBUG m_lastPhase = phase; #endif #if MEASURE_CLRAPI_CALLS m_CLRcallInvokes = 0; m_CLRcallCycles = 0; #endif } #if MEASURE_CLRAPI_CALLS //------------------------------------------------------------------------ // JitTimer::CLRApiCallEnter: Start the stopwatch for an EE call. // // Arguments: // apix - The API index - an "enum API_ICorJitInfo_Names" value. // void JitTimer::CLRApiCallEnter(unsigned apix) { assert(m_CLRcallAPInum == -1); // Nested calls not allowed m_CLRcallAPInum = apix; // If we can't get the cycles, we'll just ignore this call if (!_our_GetThreadCycles(&m_CLRcallStart)) m_CLRcallStart = 0; } //------------------------------------------------------------------------ // JitTimer::CLRApiCallLeave: compute / record time spent in an EE call. // // Arguments: // apix - The API's "enum API_ICorJitInfo_Names" value; this value // should match the value passed to the most recent call to // "CLRApiCallEnter" (i.e. these must come as matched pairs), // and they also may not nest. // void JitTimer::CLRApiCallLeave(unsigned apix) { // Make sure we're actually inside a measured CLR call. assert(m_CLRcallAPInum != -1); m_CLRcallAPInum = -1; // Ignore this one if we don't have a valid starting counter. if (m_CLRcallStart != 0) { if (JitConfig.JitEECallTimingInfo() != 0) { unsigned __int64 threadCurCycles; if (_our_GetThreadCycles(&threadCurCycles)) { // Compute the cycles spent in the call. threadCurCycles -= m_CLRcallStart; // Add the cycles to the 'phase' and bump its use count. m_info.m_cyclesByPhase[PHASE_CLR_API] += threadCurCycles; m_info.m_invokesByPhase[PHASE_CLR_API] += 1; // Add the values to the "per API" info. m_info.m_allClrAPIcycles += threadCurCycles; m_info.m_allClrAPIcalls += 1; m_info.m_perClrAPIcalls[apix] += 1; m_info.m_perClrAPIcycles[apix] += threadCurCycles; m_info.m_maxClrAPIcycles[apix] = max(m_info.m_maxClrAPIcycles[apix], (unsigned __int32)threadCurCycles); // Subtract the cycles from the enclosing phase by bumping its start time m_curPhaseStart += threadCurCycles; // Update the running totals. m_CLRcallInvokes += 1; m_CLRcallCycles += threadCurCycles; } } m_CLRcallStart = 0; } assert(m_CLRcallAPInum != -1); // No longer in this API call. m_CLRcallAPInum = -1; } #endif // MEASURE_CLRAPI_CALLS CritSecObject JitTimer::s_csvLock; // It's expensive to constantly open and close the file, so open it once and close it // when the process exits. This should be accessed under the s_csvLock. FILE* JitTimer::s_csvFile = nullptr; LPCWSTR Compiler::JitTimeLogCsv() { LPCWSTR jitTimeLogCsv = JitConfig.JitTimeLogCsv(); return jitTimeLogCsv; } void JitTimer::PrintCsvHeader() { LPCWSTR jitTimeLogCsv = Compiler::JitTimeLogCsv(); if (jitTimeLogCsv == nullptr) { return; } CritSecHolder csvLock(s_csvLock); if (s_csvFile == nullptr) { s_csvFile = _wfopen(jitTimeLogCsv, W("a")); } if (s_csvFile != nullptr) { // Seek to the end of the file s.t. `ftell` doesn't lie to us on Windows fseek(s_csvFile, 0, SEEK_END); // Write the header if the file is empty if (ftell(s_csvFile) == 0) { fprintf(s_csvFile, "\"Method Name\","); fprintf(s_csvFile, "\"Assembly or SPMI Index\","); fprintf(s_csvFile, "\"IL Bytes\","); fprintf(s_csvFile, "\"Basic Blocks\","); fprintf(s_csvFile, "\"Min Opts\","); fprintf(s_csvFile, "\"Loops\","); fprintf(s_csvFile, "\"Loops Cloned\","); #if FEATURE_LOOP_ALIGN #ifdef DEBUG fprintf(s_csvFile, "\"Alignment Candidates\","); fprintf(s_csvFile, "\"Loops Aligned\","); #endif // DEBUG #endif // FEATURE_LOOP_ALIGN for (int i = 0; i < PHASE_NUMBER_OF; i++) { fprintf(s_csvFile, "\"%s\",", PhaseNames[i]); if ((JitConfig.JitMeasureIR() != 0) && PhaseReportsIRSize[i]) { fprintf(s_csvFile, "\"Node Count After %s\",", PhaseNames[i]); } } InlineStrategy::DumpCsvHeader(s_csvFile); fprintf(s_csvFile, "\"Executable Code Bytes\","); fprintf(s_csvFile, "\"GC Info Bytes\","); fprintf(s_csvFile, "\"Total Bytes Allocated\","); fprintf(s_csvFile, "\"Total Cycles\","); fprintf(s_csvFile, "\"CPS\"\n"); fflush(s_csvFile); } } } void JitTimer::PrintCsvMethodStats(Compiler* comp) { LPCWSTR jitTimeLogCsv = Compiler::JitTimeLogCsv(); if (jitTimeLogCsv == nullptr) { return; } // eeGetMethodFullName uses locks, so don't enter crit sec before this call. #if defined(DEBUG) || defined(LATE_DISASM) // If we already have computed the name because for some reason we're generating the CSV // for a DEBUG build (presumably not for the time info), just re-use it. const char* methName = comp->info.compFullName; #else const char* methName = comp->eeGetMethodFullName(comp->info.compMethodHnd); #endif // Try and access the SPMI index to report in the data set. // // If the jit is not hosted under SPMI this will return the // default value of zero. // // Query the jit host directly here instead of going via the // config cache, since value will change for each method. int index = g_jitHost->getIntConfigValue(W("SuperPMIMethodContextNumber"), -1); CritSecHolder csvLock(s_csvLock); if (s_csvFile == nullptr) { return; } fprintf(s_csvFile, "\"%s\",", methName); if (index != 0) { fprintf(s_csvFile, "%d,", index); } else { const char* methodAssemblyName = comp->info.compCompHnd->getAssemblyName( comp->info.compCompHnd->getModuleAssembly(comp->info.compCompHnd->getClassModule(comp->info.compClassHnd))); fprintf(s_csvFile, "\"%s\",", methodAssemblyName); } fprintf(s_csvFile, "%u,", comp->info.compILCodeSize); fprintf(s_csvFile, "%u,", comp->fgBBcount); fprintf(s_csvFile, "%u,", comp->opts.MinOpts()); fprintf(s_csvFile, "%u,", comp->optLoopCount); fprintf(s_csvFile, "%u,", comp->optLoopsCloned); #if FEATURE_LOOP_ALIGN #ifdef DEBUG fprintf(s_csvFile, "%u,", comp->loopAlignCandidates); fprintf(s_csvFile, "%u,", comp->loopsAligned); #endif // DEBUG #endif // FEATURE_LOOP_ALIGN unsigned __int64 totCycles = 0; for (int i = 0; i < PHASE_NUMBER_OF; i++) { if (!PhaseHasChildren[i]) { totCycles += m_info.m_cyclesByPhase[i]; } fprintf(s_csvFile, "%I64u,", m_info.m_cyclesByPhase[i]); if ((JitConfig.JitMeasureIR() != 0) && PhaseReportsIRSize[i]) { fprintf(s_csvFile, "%u,", m_info.m_nodeCountAfterPhase[i]); } } comp->m_inlineStrategy->DumpCsvData(s_csvFile); fprintf(s_csvFile, "%u,", comp->info.compNativeCodeSize); fprintf(s_csvFile, "%Iu,", comp->compInfoBlkSize); fprintf(s_csvFile, "%Iu,", comp->compGetArenaAllocator()->getTotalBytesAllocated()); fprintf(s_csvFile, "%I64u,", m_info.m_totalCycles); fprintf(s_csvFile, "%f\n", CachedCyclesPerSecond()); fflush(s_csvFile); } // Perform process shutdown actions. // // static void JitTimer::Shutdown() { CritSecHolder csvLock(s_csvLock); if (s_csvFile != nullptr) { fclose(s_csvFile); } } // Completes the timing of the current method, and adds it to "sum". void JitTimer::Terminate(Compiler* comp, CompTimeSummaryInfo& sum, bool includePhases) { if (includePhases) { PrintCsvMethodStats(comp); } sum.AddInfo(m_info, includePhases); } #endif // FEATURE_JIT_METHOD_PERF #if LOOP_HOIST_STATS // Static fields. CritSecObject Compiler::s_loopHoistStatsLock; // Default constructor. unsigned Compiler::s_loopsConsidered = 0; unsigned Compiler::s_loopsWithHoistedExpressions = 0; unsigned Compiler::s_totalHoistedExpressions = 0; // static void Compiler::PrintAggregateLoopHoistStats(FILE* f) { fprintf(f, "\n"); fprintf(f, "---------------------------------------------------\n"); fprintf(f, "Loop hoisting stats\n"); fprintf(f, "---------------------------------------------------\n"); double pctWithHoisted = 0.0; if (s_loopsConsidered > 0) { pctWithHoisted = 100.0 * (double(s_loopsWithHoistedExpressions) / double(s_loopsConsidered)); } double exprsPerLoopWithExpr = 0.0; if (s_loopsWithHoistedExpressions > 0) { exprsPerLoopWithExpr = double(s_totalHoistedExpressions) / double(s_loopsWithHoistedExpressions); } fprintf(f, "Considered %d loops. Of these, we hoisted expressions out of %d (%6.2f%%).\n", s_loopsConsidered, s_loopsWithHoistedExpressions, pctWithHoisted); fprintf(f, " A total of %d expressions were hoisted, an average of %5.2f per loop-with-hoisted-expr.\n", s_totalHoistedExpressions, exprsPerLoopWithExpr); } void Compiler::AddLoopHoistStats() { CritSecHolder statsLock(s_loopHoistStatsLock); s_loopsConsidered += m_loopsConsidered; s_loopsWithHoistedExpressions += m_loopsWithHoistedExpressions; s_totalHoistedExpressions += m_totalHoistedExpressions; } void Compiler::PrintPerMethodLoopHoistStats() { double pctWithHoisted = 0.0; if (m_loopsConsidered > 0) { pctWithHoisted = 100.0 * (double(m_loopsWithHoistedExpressions) / double(m_loopsConsidered)); } double exprsPerLoopWithExpr = 0.0; if (m_loopsWithHoistedExpressions > 0) { exprsPerLoopWithExpr = double(m_totalHoistedExpressions) / double(m_loopsWithHoistedExpressions); } printf("Considered %d loops. Of these, we hoisted expressions out of %d (%5.2f%%).\n", m_loopsConsidered, m_loopsWithHoistedExpressions, pctWithHoisted); printf(" A total of %d expressions were hoisted, an average of %5.2f per loop-with-hoisted-expr.\n", m_totalHoistedExpressions, exprsPerLoopWithExpr); } #endif // LOOP_HOIST_STATS //------------------------------------------------------------------------ // RecordStateAtEndOfInlining: capture timing data (if enabled) after // inlining as completed. // // Note: // Records data needed for SQM and inlining data dumps. Should be // called after inlining is complete. (We do this after inlining // because this marks the last point at which the JIT is likely to // cause type-loading and class initialization). void Compiler::RecordStateAtEndOfInlining() { #if defined(DEBUG) || defined(INLINE_DATA) m_compCyclesAtEndOfInlining = 0; m_compTickCountAtEndOfInlining = 0; bool b = CycleTimer::GetThreadCyclesS(&m_compCyclesAtEndOfInlining); if (!b) { return; // We don't have a thread cycle counter. } m_compTickCountAtEndOfInlining = GetTickCount(); #endif // defined(DEBUG) || defined(INLINE_DATA) } //------------------------------------------------------------------------ // RecordStateAtEndOfCompilation: capture timing data (if enabled) after // compilation is completed. void Compiler::RecordStateAtEndOfCompilation() { #if defined(DEBUG) || defined(INLINE_DATA) // Common portion m_compCycles = 0; unsigned __int64 compCyclesAtEnd; bool b = CycleTimer::GetThreadCyclesS(&compCyclesAtEnd); if (!b) { return; // We don't have a thread cycle counter. } assert(compCyclesAtEnd >= m_compCyclesAtEndOfInlining); m_compCycles = compCyclesAtEnd - m_compCyclesAtEndOfInlining; #endif // defined(DEBUG) || defined(INLINE_DATA) } #if FUNC_INFO_LOGGING // static LPCWSTR Compiler::compJitFuncInfoFilename = nullptr; // static FILE* Compiler::compJitFuncInfoFile = nullptr; #endif // FUNC_INFO_LOGGING #ifdef DEBUG // dumpConvertedVarSet() dumps the varset bits that are tracked // variable indices, and we convert them to variable numbers, sort the variable numbers, and // print them as variable numbers. To do this, we use a temporary set indexed by // variable number. We can't use the "all varset" type because it is still size-limited, and might // not be big enough to handle all possible variable numbers. void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars) { BYTE* pVarNumSet; // trivial set: one byte per varNum, 0 means not in set, 1 means in set. size_t varNumSetBytes = comp->lvaCount * sizeof(BYTE); pVarNumSet = (BYTE*)_alloca(varNumSetBytes); memset(pVarNumSet, 0, varNumSetBytes); // empty the set VarSetOps::Iter iter(comp, vars); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { unsigned varNum = comp->lvaTrackedIndexToLclNum(varIndex); pVarNumSet[varNum] = 1; // This varNum is in the set } bool first = true; printf("{"); for (size_t varNum = 0; varNum < comp->lvaCount; varNum++) { if (pVarNumSet[varNum] == 1) { if (!first) { printf(" "); } printf("V%02u", varNum); first = false; } } printf("}"); } /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Debugging helpers XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ /* The following functions are intended to be called from the debugger, to dump * various data structures. * * The versions that start with 'c' take a Compiler* as the first argument. * The versions that start with 'd' use the tlsCompiler, so don't require a Compiler*. * * Summary: * cBlock, dBlock : Display a basic block (call fgTableDispBasicBlock()). * cBlocks, dBlocks : Display all the basic blocks of a function (call fgDispBasicBlocks()). * cBlocksV, dBlocksV : Display all the basic blocks of a function (call fgDispBasicBlocks(true)). * "V" means "verbose", and will dump all the trees. * cStmt, dStmt : Display a Statement (call gtDispStmt()). * cTree, dTree : Display a tree (call gtDispTree()). * cTreeLIR, dTreeLIR : Display a tree in LIR form (call gtDispLIRNode()). * cTrees, dTrees : Display all the trees in a function (call fgDumpTrees()). * cEH, dEH : Display the EH handler table (call fgDispHandlerTab()). * cVar, dVar : Display a local variable given its number (call lvaDumpEntry()). * cVarDsc, dVarDsc : Display a local variable given a LclVarDsc* (call lvaDumpEntry()). * cVars, dVars : Display the local variable table (call lvaTableDump()). * cVarsFinal, dVarsFinal : Display the local variable table (call lvaTableDump(FINAL_FRAME_LAYOUT)). * cBlockCheapPreds, dBlockCheapPreds : Display a block's cheap predecessors (call block->dspCheapPreds()). * cBlockPreds, dBlockPreds : Display a block's predecessors (call block->dspPreds()). * cBlockSuccs, dBlockSuccs : Display a block's successors (call block->dspSuccs(compiler)). * cReach, dReach : Display all block reachability (call fgDispReach()). * cDoms, dDoms : Display all block dominators (call fgDispDoms()). * cLiveness, dLiveness : Display per-block variable liveness (call fgDispBBLiveness()). * cCVarSet, dCVarSet : Display a "converted" VARSET_TP: the varset is assumed to be tracked variable * indices. These are converted to variable numbers and sorted. (Calls * dumpConvertedVarSet()). * cLoop, dLoop : Display the blocks of a loop, including the trees. * cTreeFlags, dTreeFlags : Display tree flags * * The following don't require a Compiler* to work: * dRegMask : Display a regMaskTP (call dspRegMask(mask)). * dBlockList : Display a BasicBlockList*. */ void cBlock(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Block %u\n", sequenceNumber++); comp->fgTableDispBasicBlock(block); } void cBlocks(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Blocks %u\n", sequenceNumber++); comp->fgDispBasicBlocks(); } void cBlocksV(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlocksV %u\n", sequenceNumber++); comp->fgDispBasicBlocks(true); } void cStmt(Compiler* comp, Statement* statement) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Stmt %u\n", sequenceNumber++); comp->gtDispStmt(statement, ">>>"); } void cTree(Compiler* comp, GenTree* tree) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Tree %u\n", sequenceNumber++); comp->gtDispTree(tree, nullptr, ">>>"); } void cTreeLIR(Compiler* comp, GenTree* tree) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *TreeLIR %u\n", sequenceNumber++); comp->gtDispLIRNode(tree); } void cTrees(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Trees %u\n", sequenceNumber++); comp->fgDumpTrees(comp->fgFirstBB, nullptr); } void cEH(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *EH %u\n", sequenceNumber++); comp->fgDispHandlerTab(); } void cVar(Compiler* comp, unsigned lclNum) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Var %u\n", sequenceNumber++); comp->lvaDumpEntry(lclNum, Compiler::FINAL_FRAME_LAYOUT); } void cVarDsc(Compiler* comp, LclVarDsc* varDsc) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *VarDsc %u\n", sequenceNumber++); unsigned lclNum = comp->lvaGetLclNum(varDsc); comp->lvaDumpEntry(lclNum, Compiler::FINAL_FRAME_LAYOUT); } void cVars(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Vars %u\n", sequenceNumber++); comp->lvaTableDump(); } void cVarsFinal(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Vars %u\n", sequenceNumber++); comp->lvaTableDump(Compiler::FINAL_FRAME_LAYOUT); } void cBlockCheapPreds(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlockCheapPreds %u\n", sequenceNumber++); block->dspCheapPreds(); } void cBlockPreds(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlockPreds %u\n", sequenceNumber++); block->dspPreds(); } void cBlockSuccs(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlockSuccs %u\n", sequenceNumber++); block->dspSuccs(comp); } void cReach(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Reach %u\n", sequenceNumber++); comp->fgDispReach(); } void cDoms(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Doms %u\n", sequenceNumber++); comp->fgDispDoms(); } void cLiveness(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Liveness %u\n", sequenceNumber++); comp->fgDispBBLiveness(); } void cCVarSet(Compiler* comp, VARSET_VALARG_TP vars) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *CVarSet %u\n", sequenceNumber++); dumpConvertedVarSet(comp, vars); printf("\n"); // dumpConvertedVarSet() doesn't emit a trailing newline } void cLoop(Compiler* comp, unsigned loopNum) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Loop %u\n", sequenceNumber++); comp->optPrintLoopInfo(loopNum, /* verbose */ true); printf("\n"); } void cLoopPtr(Compiler* comp, const Compiler::LoopDsc* loop) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *LoopPtr %u\n", sequenceNumber++); comp->optPrintLoopInfo(loop, /* verbose */ true); printf("\n"); } void cLoops(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Loops %u\n", sequenceNumber++); comp->optPrintLoopTable(); } void dBlock(BasicBlock* block) { cBlock(JitTls::GetCompiler(), block); } void dBlocks() { cBlocks(JitTls::GetCompiler()); } void dBlocksV() { cBlocksV(JitTls::GetCompiler()); } void dStmt(Statement* statement) { cStmt(JitTls::GetCompiler(), statement); } void dTree(GenTree* tree) { cTree(JitTls::GetCompiler(), tree); } void dTreeLIR(GenTree* tree) { cTreeLIR(JitTls::GetCompiler(), tree); } void dTreeRange(GenTree* first, GenTree* last) { Compiler* comp = JitTls::GetCompiler(); GenTree* cur = first; while (true) { cTreeLIR(comp, cur); if (cur == last) break; cur = cur->gtNext; } } void dTrees() { cTrees(JitTls::GetCompiler()); } void dEH() { cEH(JitTls::GetCompiler()); } void dVar(unsigned lclNum) { cVar(JitTls::GetCompiler(), lclNum); } void dVarDsc(LclVarDsc* varDsc) { cVarDsc(JitTls::GetCompiler(), varDsc); } void dVars() { cVars(JitTls::GetCompiler()); } void dVarsFinal() { cVarsFinal(JitTls::GetCompiler()); } void dBlockPreds(BasicBlock* block) { cBlockPreds(JitTls::GetCompiler(), block); } void dBlockCheapPreds(BasicBlock* block) { cBlockCheapPreds(JitTls::GetCompiler(), block); } void dBlockSuccs(BasicBlock* block) { cBlockSuccs(JitTls::GetCompiler(), block); } void dReach() { cReach(JitTls::GetCompiler()); } void dDoms() { cDoms(JitTls::GetCompiler()); } void dLiveness() { cLiveness(JitTls::GetCompiler()); } void dCVarSet(VARSET_VALARG_TP vars) { cCVarSet(JitTls::GetCompiler(), vars); } void dLoop(unsigned loopNum) { cLoop(JitTls::GetCompiler(), loopNum); } void dLoopPtr(const Compiler::LoopDsc* loop) { cLoopPtr(JitTls::GetCompiler(), loop); } void dLoops() { cLoops(JitTls::GetCompiler()); } void dRegMask(regMaskTP mask) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== dRegMask %u\n", sequenceNumber++); dspRegMask(mask); printf("\n"); // dspRegMask() doesn't emit a trailing newline } void dBlockList(BasicBlockList* list) { printf("WorkList: "); while (list != nullptr) { printf(FMT_BB " ", list->block->bbNum); list = list->next; } printf("\n"); } // Global variables available in debug mode. That are set by debug APIs for finding // Trees, Stmts, and/or Blocks using id or bbNum. // That can be used in watch window or as a way to get address of fields for data break points. GenTree* dbTree; Statement* dbStmt; BasicBlock* dbTreeBlock; BasicBlock* dbBlock; // Debug APIs for finding Trees, Stmts, and/or Blocks. // As a side effect, they set the debug variables above. GenTree* dFindTree(GenTree* tree, unsigned id) { if (tree == nullptr) { return nullptr; } if (tree->gtTreeID == id) { dbTree = tree; return tree; } GenTree* child = nullptr; tree->VisitOperands([&child, id](GenTree* operand) -> GenTree::VisitResult { child = dFindTree(child, id); return (child != nullptr) ? GenTree::VisitResult::Abort : GenTree::VisitResult::Continue; }); return child; } GenTree* dFindTree(unsigned id) { Compiler* comp = JitTls::GetCompiler(); GenTree* tree; dbTreeBlock = nullptr; dbTree = nullptr; for (BasicBlock* const block : comp->Blocks()) { for (Statement* const stmt : block->Statements()) { tree = dFindTree(stmt->GetRootNode(), id); if (tree != nullptr) { dbTreeBlock = block; return tree; } } } return nullptr; } Statement* dFindStmt(unsigned id) { Compiler* comp = JitTls::GetCompiler(); dbStmt = nullptr; unsigned stmtId = 0; for (BasicBlock* const block : comp->Blocks()) { for (Statement* const stmt : block->Statements()) { stmtId++; if (stmtId == id) { dbStmt = stmt; return stmt; } } } return nullptr; } BasicBlock* dFindBlock(unsigned bbNum) { Compiler* comp = JitTls::GetCompiler(); BasicBlock* block = nullptr; dbBlock = nullptr; for (block = comp->fgFirstBB; block != nullptr; block = block->bbNext) { if (block->bbNum == bbNum) { dbBlock = block; break; } } return block; } Compiler::LoopDsc* dFindLoop(unsigned loopNum) { Compiler* comp = JitTls::GetCompiler(); if (loopNum >= comp->optLoopCount) { printf("loopNum %u out of range\n"); return nullptr; } return &comp->optLoopTable[loopNum]; } void cTreeFlags(Compiler* comp, GenTree* tree) { int chars = 0; if (tree->gtFlags != 0) { chars += printf("flags="); // Node flags CLANG_FORMAT_COMMENT_ANCHOR; #if defined(DEBUG) if (tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE) { chars += printf("[NODE_LARGE]"); } if (tree->gtDebugFlags & GTF_DEBUG_NODE_SMALL) { chars += printf("[NODE_SMALL]"); } if (tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) { chars += printf("[MORPHED]"); } #endif // defined(DEBUG) if (tree->gtFlags & GTF_COLON_COND) { chars += printf("[COLON_COND]"); } // Operator flags genTreeOps op = tree->OperGet(); switch (op) { case GT_LCL_VAR: case GT_LCL_VAR_ADDR: case GT_LCL_FLD: case GT_LCL_FLD_ADDR: case GT_STORE_LCL_FLD: case GT_STORE_LCL_VAR: if (tree->gtFlags & GTF_VAR_DEF) { chars += printf("[VAR_DEF]"); } if (tree->gtFlags & GTF_VAR_USEASG) { chars += printf("[VAR_USEASG]"); } if (tree->gtFlags & GTF_VAR_CAST) { chars += printf("[VAR_CAST]"); } if (tree->gtFlags & GTF_VAR_ITERATOR) { chars += printf("[VAR_ITERATOR]"); } if (tree->gtFlags & GTF_VAR_CLONED) { chars += printf("[VAR_CLONED]"); } if (tree->gtFlags & GTF_VAR_DEATH) { chars += printf("[VAR_DEATH]"); } if (tree->gtFlags & GTF_VAR_ARR_INDEX) { chars += printf("[VAR_ARR_INDEX]"); } #if defined(DEBUG) if (tree->gtDebugFlags & GTF_DEBUG_VAR_CSE_REF) { chars += printf("[VAR_CSE_REF]"); } #endif break; case GT_NO_OP: break; case GT_FIELD: if (tree->gtFlags & GTF_FLD_VOLATILE) { chars += printf("[FLD_VOLATILE]"); } break; case GT_INDEX: if (tree->gtFlags & GTF_INX_STRING_LAYOUT) { chars += printf("[INX_STRING_LAYOUT]"); } FALLTHROUGH; case GT_INDEX_ADDR: if (tree->gtFlags & GTF_INX_RNGCHK) { chars += printf("[INX_RNGCHK]"); } break; case GT_IND: case GT_STOREIND: if (tree->gtFlags & GTF_IND_VOLATILE) { chars += printf("[IND_VOLATILE]"); } if (tree->gtFlags & GTF_IND_TGTANYWHERE) { chars += printf("[IND_TGTANYWHERE]"); } if (tree->gtFlags & GTF_IND_TGT_NOT_HEAP) { chars += printf("[IND_TGT_NOT_HEAP]"); } if (tree->gtFlags & GTF_IND_TLS_REF) { chars += printf("[IND_TLS_REF]"); } if (tree->gtFlags & GTF_IND_ASG_LHS) { chars += printf("[IND_ASG_LHS]"); } if (tree->gtFlags & GTF_IND_UNALIGNED) { chars += printf("[IND_UNALIGNED]"); } if (tree->gtFlags & GTF_IND_INVARIANT) { chars += printf("[IND_INVARIANT]"); } if (tree->gtFlags & GTF_IND_NONNULL) { chars += printf("[IND_NONNULL]"); } break; case GT_CLS_VAR: if (tree->gtFlags & GTF_CLS_VAR_ASG_LHS) { chars += printf("[CLS_VAR_ASG_LHS]"); } break; case GT_MUL: #if !defined(TARGET_64BIT) case GT_MUL_LONG: #endif if (tree->gtFlags & GTF_MUL_64RSLT) { chars += printf("[64RSLT]"); } if (tree->gtFlags & GTF_ADDRMODE_NO_CSE) { chars += printf("[ADDRMODE_NO_CSE]"); } break; case GT_ADD: if (tree->gtFlags & GTF_ADDRMODE_NO_CSE) { chars += printf("[ADDRMODE_NO_CSE]"); } break; case GT_LSH: if (tree->gtFlags & GTF_ADDRMODE_NO_CSE) { chars += printf("[ADDRMODE_NO_CSE]"); } break; case GT_MOD: case GT_UMOD: break; case GT_EQ: case GT_NE: case GT_LT: case GT_LE: case GT_GT: case GT_GE: if (tree->gtFlags & GTF_RELOP_NAN_UN) { chars += printf("[RELOP_NAN_UN]"); } if (tree->gtFlags & GTF_RELOP_JMP_USED) { chars += printf("[RELOP_JMP_USED]"); } break; case GT_QMARK: if (tree->gtFlags & GTF_QMARK_CAST_INSTOF) { chars += printf("[QMARK_CAST_INSTOF]"); } break; case GT_BOX: if (tree->gtFlags & GTF_BOX_VALUE) { chars += printf("[BOX_VALUE]"); } break; case GT_CNS_INT: { GenTreeFlags handleKind = (tree->gtFlags & GTF_ICON_HDL_MASK); switch (handleKind) { case GTF_ICON_SCOPE_HDL: chars += printf("[ICON_SCOPE_HDL]"); break; case GTF_ICON_CLASS_HDL: chars += printf("[ICON_CLASS_HDL]"); break; case GTF_ICON_METHOD_HDL: chars += printf("[ICON_METHOD_HDL]"); break; case GTF_ICON_FIELD_HDL: chars += printf("[ICON_FIELD_HDL]"); break; case GTF_ICON_STATIC_HDL: chars += printf("[ICON_STATIC_HDL]"); break; case GTF_ICON_STR_HDL: chars += printf("[ICON_STR_HDL]"); break; case GTF_ICON_CONST_PTR: chars += printf("[ICON_CONST_PTR]"); break; case GTF_ICON_GLOBAL_PTR: chars += printf("[ICON_GLOBAL_PTR]"); break; case GTF_ICON_VARG_HDL: chars += printf("[ICON_VARG_HDL]"); break; case GTF_ICON_PINVKI_HDL: chars += printf("[ICON_PINVKI_HDL]"); break; case GTF_ICON_TOKEN_HDL: chars += printf("[ICON_TOKEN_HDL]"); break; case GTF_ICON_TLS_HDL: chars += printf("[ICON_TLD_HDL]"); break; case GTF_ICON_FTN_ADDR: chars += printf("[ICON_FTN_ADDR]"); break; case GTF_ICON_CIDMID_HDL: chars += printf("[ICON_CIDMID_HDL]"); break; case GTF_ICON_BBC_PTR: chars += printf("[ICON_BBC_PTR]"); break; case GTF_ICON_STATIC_BOX_PTR: chars += printf("[GTF_ICON_STATIC_BOX_PTR]"); break; case GTF_ICON_FIELD_OFF: chars += printf("[ICON_FIELD_OFF]"); break; default: assert(!"a forgotten handle flag"); break; } } break; case GT_OBJ: case GT_STORE_OBJ: if (tree->AsObj()->GetLayout()->HasGCPtr()) { chars += printf("[BLK_HASGCPTR]"); } FALLTHROUGH; case GT_BLK: case GT_STORE_BLK: case GT_STORE_DYN_BLK: if (tree->gtFlags & GTF_BLK_VOLATILE) { chars += printf("[BLK_VOLATILE]"); } if (tree->AsBlk()->IsUnaligned()) { chars += printf("[BLK_UNALIGNED]"); } break; case GT_CALL: if (tree->gtFlags & GTF_CALL_UNMANAGED) { chars += printf("[CALL_UNMANAGED]"); } if (tree->gtFlags & GTF_CALL_INLINE_CANDIDATE) { chars += printf("[CALL_INLINE_CANDIDATE]"); } if (!tree->AsCall()->IsVirtual()) { chars += printf("[CALL_NONVIRT]"); } if (tree->AsCall()->IsVirtualVtable()) { chars += printf("[CALL_VIRT_VTABLE]"); } if (tree->AsCall()->IsVirtualStub()) { chars += printf("[CALL_VIRT_STUB]"); } if (tree->gtFlags & GTF_CALL_NULLCHECK) { chars += printf("[CALL_NULLCHECK]"); } if (tree->gtFlags & GTF_CALL_POP_ARGS) { chars += printf("[CALL_POP_ARGS]"); } if (tree->gtFlags & GTF_CALL_HOISTABLE) { chars += printf("[CALL_HOISTABLE]"); } // More flags associated with calls. { GenTreeCall* call = tree->AsCall(); if (call->gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL) { chars += printf("[CALL_M_EXPLICIT_TAILCALL]"); } if (call->gtCallMoreFlags & GTF_CALL_M_TAILCALL) { chars += printf("[CALL_M_TAILCALL]"); } if (call->gtCallMoreFlags & GTF_CALL_M_VARARGS) { chars += printf("[CALL_M_VARARGS]"); } if (call->gtCallMoreFlags & GTF_CALL_M_RETBUFFARG) { chars += printf("[CALL_M_RETBUFFARG]"); } if (call->gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV) { chars += printf("[CALL_M_DELEGATE_INV]"); } if (call->gtCallMoreFlags & GTF_CALL_M_NOGCCHECK) { chars += printf("[CALL_M_NOGCCHECK]"); } if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) { chars += printf("[CALL_M_SPECIAL_INTRINSIC]"); } if (call->IsUnmanaged()) { if (call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { chars += printf("[CALL_M_UNMGD_THISCALL]"); } } else if (call->IsVirtualStub()) { if (call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT) { chars += printf("[CALL_M_VIRTSTUB_REL_INDIRECT]"); } } else if (!call->IsVirtual()) { if (call->gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS) { chars += printf("[CALL_M_NONVIRT_SAME_THIS]"); } } if (call->gtCallMoreFlags & GTF_CALL_M_FRAME_VAR_DEATH) { chars += printf("[CALL_M_FRAME_VAR_DEATH]"); } if (call->gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER) { chars += printf("[CALL_M_TAILCALL_VIA_JIT_HELPER]"); } #if FEATURE_TAILCALL_OPT if (call->gtCallMoreFlags & GTF_CALL_M_IMPLICIT_TAILCALL) { chars += printf("[CALL_M_IMPLICIT_TAILCALL]"); } #endif if (call->gtCallMoreFlags & GTF_CALL_M_PINVOKE) { chars += printf("[CALL_M_PINVOKE]"); } if (call->IsFatPointerCandidate()) { chars += printf("[CALL_FAT_POINTER_CANDIDATE]"); } if (call->IsGuarded()) { chars += printf("[CALL_GUARDED]"); } if (call->IsExpRuntimeLookup()) { chars += printf("[CALL_EXP_RUNTIME_LOOKUP]"); } } break; default: { GenTreeFlags flags = (tree->gtFlags & (~(GTF_COMMON_MASK | GTF_OVERFLOW))); if (flags != 0) { chars += printf("[%08X]", flags); } } break; } // Common flags. if (tree->gtFlags & GTF_ASG) { chars += printf("[ASG]"); } if (tree->gtFlags & GTF_CALL) { chars += printf("[CALL]"); } switch (op) { case GT_MUL: case GT_CAST: case GT_ADD: case GT_SUB: if (tree->gtFlags & GTF_OVERFLOW) { chars += printf("[OVERFLOW]"); } break; default: break; } if (tree->gtFlags & GTF_EXCEPT) { chars += printf("[EXCEPT]"); } if (tree->gtFlags & GTF_GLOB_REF) { chars += printf("[GLOB_REF]"); } if (tree->gtFlags & GTF_ORDER_SIDEEFF) { chars += printf("[ORDER_SIDEEFF]"); } if (tree->gtFlags & GTF_REVERSE_OPS) { if (op != GT_LCL_VAR) { chars += printf("[REVERSE_OPS]"); } } if (tree->gtFlags & GTF_SPILLED) { chars += printf("[SPILLED_OPER]"); } #if FEATURE_SET_FLAGS if (tree->gtFlags & GTF_SET_FLAGS) { if ((op != GT_IND) && (op != GT_STOREIND)) { chars += printf("[ZSF_SET_FLAGS]"); } } #endif if (tree->gtFlags & GTF_IND_NONFAULTING) { if (tree->OperIsIndirOrArrLength()) { chars += printf("[IND_NONFAULTING]"); } } if (tree->gtFlags & GTF_MAKE_CSE) { chars += printf("[MAKE_CSE]"); } if (tree->gtFlags & GTF_DONT_CSE) { chars += printf("[DONT_CSE]"); } if (tree->gtFlags & GTF_BOOLEAN) { chars += printf("[BOOLEAN]"); } if (tree->gtFlags & GTF_UNSIGNED) { chars += printf("[SMALL_UNSIGNED]"); } if (tree->gtFlags & GTF_LATE_ARG) { chars += printf("[SMALL_LATE_ARG]"); } if (tree->gtFlags & GTF_SPILL) { chars += printf("[SPILL]"); } if (tree->gtFlags & GTF_REUSE_REG_VAL) { if (op == GT_CNS_INT) { chars += printf("[REUSE_REG_VAL]"); } } } } void dTreeFlags(GenTree* tree) { cTreeFlags(JitTls::GetCompiler(), tree); } #endif // DEBUG #if VARSET_COUNTOPS // static BitSetSupport::BitSetOpCounter Compiler::m_varsetOpCounter("VarSetOpCounts.log"); #endif #if ALLVARSET_COUNTOPS // static BitSetSupport::BitSetOpCounter Compiler::m_allvarsetOpCounter("AllVarSetOpCounts.log"); #endif // static HelperCallProperties Compiler::s_helperCallProperties; /*****************************************************************************/ /*****************************************************************************/ //------------------------------------------------------------------------ // killGCRefs: // Given some tree node return does it need all GC refs to be spilled from // callee save registers. // // Arguments: // tree - the tree for which we ask about gc refs. // // Return Value: // true - tree kills GC refs on callee save registers // false - tree doesn't affect GC refs on callee save registers bool Compiler::killGCRefs(GenTree* tree) { if (tree->IsCall()) { GenTreeCall* call = tree->AsCall(); if (call->IsUnmanaged()) { return true; } if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_JIT_PINVOKE_BEGIN)) { assert(opts.ShouldUsePInvokeHelpers()); return true; } } else if (tree->OperIs(GT_START_PREEMPTGC)) { return true; } return false; } //------------------------------------------------------------------------ // lvaIsOSRLocal: check if this local var is one that requires special // treatment for OSR compilations. // // Arguments: // varNum - variable of interest // // Return Value: // true - this is an OSR compile and this local requires special treatment // false - not an OSR compile, or not an interesting local for OSR bool Compiler::lvaIsOSRLocal(unsigned varNum) { if (!opts.IsOSR()) { return false; } if (varNum < info.compLocalsCount) { return true; } LclVarDsc* varDsc = lvaGetDesc(varNum); if (varDsc->lvIsStructField) { return (varDsc->lvParentLcl < info.compLocalsCount); } return false; } //------------------------------------------------------------------------------ // gtTypeForNullCheck: helper to get the most optimal and correct type for nullcheck // // Arguments: // tree - the node for nullcheck; // var_types Compiler::gtTypeForNullCheck(GenTree* tree) { if (varTypeIsArithmetic(tree)) { #if defined(TARGET_XARCH) // Just an optimization for XARCH - smaller mov if (varTypeIsLong(tree)) { return TYP_INT; } #endif return tree->TypeGet(); } // for the rest: probe a single byte to avoid potential AVEs return TYP_BYTE; } //------------------------------------------------------------------------------ // gtChangeOperToNullCheck: helper to change tree oper to a NULLCHECK. // // Arguments: // tree - the node to change; // basicBlock - basic block of the node. // // Notes: // the function should not be called after lowering for platforms that do not support // emitting NULLCHECK nodes, like arm32. Use `Lowering::TransformUnusedIndirection` // that handles it and calls this function when appropriate. // void Compiler::gtChangeOperToNullCheck(GenTree* tree, BasicBlock* block) { assert(tree->OperIs(GT_FIELD, GT_IND, GT_OBJ, GT_BLK)); tree->ChangeOper(GT_NULLCHECK); tree->ChangeType(gtTypeForNullCheck(tree)); block->bbFlags |= BBF_HAS_NULLCHECK; optMethodFlags |= OMF_HAS_NULLCHECK; } #if defined(DEBUG) //------------------------------------------------------------------------------ // devirtualizationDetailToString: describe the detailed devirtualization reason // // Arguments: // detail - detail to describe // // Returns: // descriptive string // const char* Compiler::devirtualizationDetailToString(CORINFO_DEVIRTUALIZATION_DETAIL detail) { switch (detail) { case CORINFO_DEVIRTUALIZATION_UNKNOWN: return "unknown"; case CORINFO_DEVIRTUALIZATION_SUCCESS: return "success"; case CORINFO_DEVIRTUALIZATION_FAILED_CANON: return "object class was canonical"; case CORINFO_DEVIRTUALIZATION_FAILED_COM: return "object class was com"; case CORINFO_DEVIRTUALIZATION_FAILED_CAST: return "object class could not be cast to interface class"; case CORINFO_DEVIRTUALIZATION_FAILED_LOOKUP: return "interface method could not be found"; case CORINFO_DEVIRTUALIZATION_FAILED_DIM: return "interface method was default interface method"; case CORINFO_DEVIRTUALIZATION_FAILED_SUBCLASS: return "object not subclass of base class"; case CORINFO_DEVIRTUALIZATION_FAILED_SLOT: return "virtual method installed via explicit override"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE: return "devirtualization crossed version bubble"; case CORINFO_DEVIRTUALIZATION_MULTIPLE_IMPL: return "object class has multiple implementations of interface"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_CLASS_DECL: return "decl method is defined on class and decl method not in version bubble, and decl method not in " "type closest to version bubble"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_INTERFACE_DECL: return "decl method is defined on interface and not in version bubble, and implementation type not " "entirely defined in bubble"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_IMPL: return "object class not defined within version bubble"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_IMPL_NOT_REFERENCEABLE: return "object class cannot be referenced from R2R code due to missing tokens"; case CORINFO_DEVIRTUALIZATION_FAILED_DUPLICATE_INTERFACE: return "crossgen2 virtual method algorithm and runtime algorithm differ in the presence of duplicate " "interface implementations"; case CORINFO_DEVIRTUALIZATION_FAILED_DECL_NOT_REPRESENTABLE: return "Decl method cannot be represented in R2R image"; default: return "undefined"; } } #endif // defined(DEBUG) #if TRACK_ENREG_STATS Compiler::EnregisterStats Compiler::s_enregisterStats; void Compiler::EnregisterStats::RecordLocal(const LclVarDsc* varDsc) { m_totalNumberOfVars++; if (varDsc->TypeGet() == TYP_STRUCT) { m_totalNumberOfStructVars++; } if (!varDsc->lvDoNotEnregister) { m_totalNumberOfEnregVars++; if (varDsc->TypeGet() == TYP_STRUCT) { m_totalNumberOfStructEnregVars++; } } else { switch (varDsc->GetDoNotEnregReason()) { case DoNotEnregisterReason::AddrExposed: m_addrExposed++; break; case DoNotEnregisterReason::DontEnregStructs: m_dontEnregStructs++; break; case DoNotEnregisterReason::NotRegSizeStruct: m_notRegSizeStruct++; break; case DoNotEnregisterReason::LocalField: m_localField++; break; case DoNotEnregisterReason::VMNeedsStackAddr: m_VMNeedsStackAddr++; break; case DoNotEnregisterReason::LiveInOutOfHandler: m_liveInOutHndlr++; break; case DoNotEnregisterReason::BlockOp: m_blockOp++; break; case DoNotEnregisterReason::IsStructArg: m_structArg++; break; case DoNotEnregisterReason::DepField: m_depField++; break; case DoNotEnregisterReason::NoRegVars: m_noRegVars++; break; case DoNotEnregisterReason::MinOptsGC: m_minOptsGC++; break; #if !defined(TARGET_64BIT) case DoNotEnregisterReason::LongParamField: m_longParamField++; break; #endif #ifdef JIT32_GCENCODER case DoNotEnregisterReason::PinningRef: m_PinningRef++; break; #endif case DoNotEnregisterReason::LclAddrNode: m_lclAddrNode++; break; case DoNotEnregisterReason::CastTakesAddr: m_castTakesAddr++; break; case DoNotEnregisterReason::StoreBlkSrc: m_storeBlkSrc++; break; case DoNotEnregisterReason::OneAsgRetyping: m_oneAsgRetyping++; break; case DoNotEnregisterReason::SwizzleArg: m_swizzleArg++; break; case DoNotEnregisterReason::BlockOpRet: m_blockOpRet++; break; case DoNotEnregisterReason::ReturnSpCheck: m_returnSpCheck++; break; case DoNotEnregisterReason::SimdUserForcesDep: m_simdUserForcesDep++; break; default: unreached(); break; } if (varDsc->GetDoNotEnregReason() == DoNotEnregisterReason::AddrExposed) { // We can't `assert(IsAddressExposed())` because `fgAdjustForAddressExposedOrWrittenThis` // does not clear `m_doNotEnregReason` on `this`. switch (varDsc->GetAddrExposedReason()) { case AddressExposedReason::PARENT_EXPOSED: m_parentExposed++; break; case AddressExposedReason::TOO_CONSERVATIVE: m_tooConservative++; break; case AddressExposedReason::ESCAPE_ADDRESS: m_escapeAddress++; break; case AddressExposedReason::WIDE_INDIR: m_wideIndir++; break; case AddressExposedReason::OSR_EXPOSED: m_osrExposed++; break; case AddressExposedReason::STRESS_LCL_FLD: m_stressLclFld++; break; case AddressExposedReason::COPY_FLD_BY_FLD: m_copyFldByFld++; break; case AddressExposedReason::DISPATCH_RET_BUF: m_dispatchRetBuf++; break; default: unreached(); break; } } } } void Compiler::EnregisterStats::Dump(FILE* fout) const { const unsigned totalNumberOfNotStructVars = s_enregisterStats.m_totalNumberOfVars - s_enregisterStats.m_totalNumberOfStructVars; const unsigned totalNumberOfNotStructEnregVars = s_enregisterStats.m_totalNumberOfEnregVars - s_enregisterStats.m_totalNumberOfStructEnregVars; const unsigned notEnreg = s_enregisterStats.m_totalNumberOfVars - s_enregisterStats.m_totalNumberOfEnregVars; fprintf(fout, "\nLocals enregistration statistics:\n"); if (m_totalNumberOfVars == 0) { fprintf(fout, "No locals to report.\n"); return; } fprintf(fout, "total number of locals: %d, number of enregistered: %d, notEnreg: %d, ratio: %.2f\n", m_totalNumberOfVars, m_totalNumberOfEnregVars, m_totalNumberOfVars - m_totalNumberOfEnregVars, (float)m_totalNumberOfEnregVars / m_totalNumberOfVars); if (m_totalNumberOfStructVars != 0) { fprintf(fout, "total number of struct locals: %d, number of enregistered: %d, notEnreg: %d, ratio: %.2f\n", m_totalNumberOfStructVars, m_totalNumberOfStructEnregVars, m_totalNumberOfStructVars - m_totalNumberOfStructEnregVars, (float)m_totalNumberOfStructEnregVars / m_totalNumberOfStructVars); } const unsigned numberOfPrimitiveLocals = totalNumberOfNotStructVars - totalNumberOfNotStructEnregVars; if (numberOfPrimitiveLocals != 0) { fprintf(fout, "total number of primitive locals: %d, number of enregistered: %d, notEnreg: %d, ratio: %.2f\n", totalNumberOfNotStructVars, totalNumberOfNotStructEnregVars, numberOfPrimitiveLocals, (float)totalNumberOfNotStructEnregVars / totalNumberOfNotStructVars); } if (notEnreg == 0) { fprintf(fout, "All locals are enregistered.\n"); return; } #define PRINT_STATS(stat, total) \ if (stat != 0) \ { \ fprintf(fout, #stat " %d, ratio: %.2f\n", stat, (float)stat / total); \ } PRINT_STATS(m_addrExposed, notEnreg); PRINT_STATS(m_dontEnregStructs, notEnreg); PRINT_STATS(m_notRegSizeStruct, notEnreg); PRINT_STATS(m_localField, notEnreg); PRINT_STATS(m_VMNeedsStackAddr, notEnreg); PRINT_STATS(m_liveInOutHndlr, notEnreg); PRINT_STATS(m_blockOp, notEnreg); PRINT_STATS(m_structArg, notEnreg); PRINT_STATS(m_depField, notEnreg); PRINT_STATS(m_noRegVars, notEnreg); PRINT_STATS(m_minOptsGC, notEnreg); #if !defined(TARGET_64BIT) PRINT_STATS(m_longParamField, notEnreg); #endif // !TARGET_64BIT #ifdef JIT32_GCENCODER PRINT_STATS(m_PinningRef, notEnreg); #endif // JIT32_GCENCODER PRINT_STATS(m_lclAddrNode, notEnreg); PRINT_STATS(m_castTakesAddr, notEnreg); PRINT_STATS(m_storeBlkSrc, notEnreg); PRINT_STATS(m_oneAsgRetyping, notEnreg); PRINT_STATS(m_swizzleArg, notEnreg); PRINT_STATS(m_blockOpRet, notEnreg); PRINT_STATS(m_returnSpCheck, notEnreg); PRINT_STATS(m_simdUserForcesDep, notEnreg); fprintf(fout, "\nAddr exposed details:\n"); if (m_addrExposed == 0) { fprintf(fout, "\nNo address exposed locals to report.\n"); return; } PRINT_STATS(m_parentExposed, m_addrExposed); PRINT_STATS(m_tooConservative, m_addrExposed); PRINT_STATS(m_escapeAddress, m_addrExposed); PRINT_STATS(m_wideIndir, m_addrExposed); PRINT_STATS(m_osrExposed, m_addrExposed); PRINT_STATS(m_stressLclFld, m_addrExposed); PRINT_STATS(m_copyFldByFld, m_addrExposed); PRINT_STATS(m_dispatchRetBuf, m_addrExposed); } #endif // TRACK_ENREG_STATS
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif // _MSC_VER #include "hostallocator.h" #include "emit.h" #include "ssabuilder.h" #include "valuenum.h" #include "rangecheck.h" #include "lower.h" #include "stacklevelsetter.h" #include "jittelemetry.h" #include "patchpointinfo.h" #include "jitstd/algorithm.h" extern ICorJitHost* g_jitHost; #if defined(DEBUG) // Column settings for COMPlus_JitDumpIR. We could(should) make these programmable. #define COLUMN_OPCODE 30 #define COLUMN_OPERANDS (COLUMN_OPCODE + 25) #define COLUMN_KINDS 110 #define COLUMN_FLAGS (COLUMN_KINDS + 32) #endif #if defined(DEBUG) unsigned Compiler::jitTotalMethodCompiled = 0; #endif // defined(DEBUG) #if defined(DEBUG) LONG Compiler::jitNestingLevel = 0; #endif // defined(DEBUG) // static bool Compiler::s_pAltJitExcludeAssembliesListInitialized = false; AssemblyNamesList2* Compiler::s_pAltJitExcludeAssembliesList = nullptr; #ifdef DEBUG // static bool Compiler::s_pJitDisasmIncludeAssembliesListInitialized = false; AssemblyNamesList2* Compiler::s_pJitDisasmIncludeAssembliesList = nullptr; // static bool Compiler::s_pJitFunctionFileInitialized = false; MethodSet* Compiler::s_pJitMethodSet = nullptr; #endif // DEBUG #ifdef CONFIGURABLE_ARM_ABI // static bool GlobalJitOptions::compFeatureHfa = false; LONG GlobalJitOptions::compUseSoftFPConfigured = 0; #endif // CONFIGURABLE_ARM_ABI /***************************************************************************** * * Little helpers to grab the current cycle counter value; this is done * differently based on target architecture, host toolchain, etc. The * main thing is to keep the overhead absolutely minimal; in fact, on * x86/x64 we use RDTSC even though it's not thread-safe; GetThreadCycles * (which is monotonous) is just too expensive. */ #ifdef FEATURE_JIT_METHOD_PERF #if defined(HOST_X86) || defined(HOST_AMD64) #if defined(_MSC_VER) #include <intrin.h> inline bool _our_GetThreadCycles(unsigned __int64* cycleOut) { *cycleOut = __rdtsc(); return true; } #elif defined(__GNUC__) inline bool _our_GetThreadCycles(unsigned __int64* cycleOut) { uint32_t hi, lo; __asm__ __volatile__("rdtsc" : "=a"(lo), "=d"(hi)); *cycleOut = (static_cast<unsigned __int64>(hi) << 32) | static_cast<unsigned __int64>(lo); return true; } #else // neither _MSC_VER nor __GNUC__ // The following *might* work - might as well try. #define _our_GetThreadCycles(cp) GetThreadCycles(cp) #endif #elif defined(HOST_ARM) || defined(HOST_ARM64) // If this doesn't work please see ../gc/gc.cpp for additional ARM // info (and possible solutions). #define _our_GetThreadCycles(cp) GetThreadCycles(cp) #else // not x86/x64 and not ARM // Don't know what this target is, but let's give it a try; if // someone really wants to make this work, please add the right // code here. #define _our_GetThreadCycles(cp) GetThreadCycles(cp) #endif // which host OS const BYTE genTypeSizes[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) sz, #include "typelist.h" #undef DEF_TP }; const BYTE genTypeAlignments[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) al, #include "typelist.h" #undef DEF_TP }; const BYTE genTypeStSzs[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) st, #include "typelist.h" #undef DEF_TP }; const BYTE genActualTypes[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) jitType, #include "typelist.h" #undef DEF_TP }; #endif // FEATURE_JIT_METHOD_PERF /*****************************************************************************/ inline unsigned getCurTime() { SYSTEMTIME tim; GetSystemTime(&tim); return (((tim.wHour * 60) + tim.wMinute) * 60 + tim.wSecond) * 1000 + tim.wMilliseconds; } /*****************************************************************************/ #ifdef DEBUG /*****************************************************************************/ static FILE* jitSrcFilePtr; static unsigned jitCurSrcLine; void Compiler::JitLogEE(unsigned level, const char* fmt, ...) { va_list args; if (verbose) { va_start(args, fmt); vflogf(jitstdout, fmt, args); va_end(args); } va_start(args, fmt); vlogf(level, fmt, args); va_end(args); } #endif // DEBUG /*****************************************************************************/ #if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS static unsigned genMethodCnt; // total number of methods JIT'ted unsigned genMethodICnt; // number of interruptible methods unsigned genMethodNCnt; // number of non-interruptible methods static unsigned genSmallMethodsNeedingExtraMemoryCnt = 0; #endif /*****************************************************************************/ #if MEASURE_NODE_SIZE NodeSizeStats genNodeSizeStats; NodeSizeStats genNodeSizeStatsPerFunc; unsigned genTreeNcntHistBuckets[] = {10, 20, 30, 40, 50, 100, 200, 300, 400, 500, 1000, 5000, 10000, 0}; Histogram genTreeNcntHist(genTreeNcntHistBuckets); unsigned genTreeNsizHistBuckets[] = {1000, 5000, 10000, 50000, 100000, 500000, 1000000, 0}; Histogram genTreeNsizHist(genTreeNsizHistBuckets); #endif // MEASURE_NODE_SIZE /*****************************************************************************/ #if MEASURE_MEM_ALLOC unsigned memAllocHistBuckets[] = {64, 128, 192, 256, 512, 1024, 4096, 8192, 0}; Histogram memAllocHist(memAllocHistBuckets); unsigned memUsedHistBuckets[] = {16, 32, 64, 128, 192, 256, 512, 1024, 4096, 8192, 0}; Histogram memUsedHist(memUsedHistBuckets); #endif // MEASURE_MEM_ALLOC /***************************************************************************** * * Variables to keep track of total code amounts. */ #if DISPLAY_SIZES size_t grossVMsize; // Total IL code size size_t grossNCsize; // Native code + data size size_t totalNCsize; // Native code + data + GC info size (TODO-Cleanup: GC info size only accurate for JIT32_GCENCODER) size_t gcHeaderISize; // GC header size: interruptible methods size_t gcPtrMapISize; // GC pointer map size: interruptible methods size_t gcHeaderNSize; // GC header size: non-interruptible methods size_t gcPtrMapNSize; // GC pointer map size: non-interruptible methods #endif // DISPLAY_SIZES /***************************************************************************** * * Variables to keep track of argument counts. */ #if CALL_ARG_STATS unsigned argTotalCalls; unsigned argHelperCalls; unsigned argStaticCalls; unsigned argNonVirtualCalls; unsigned argVirtualCalls; unsigned argTotalArgs; // total number of args for all calls (including objectPtr) unsigned argTotalDWordArgs; unsigned argTotalLongArgs; unsigned argTotalFloatArgs; unsigned argTotalDoubleArgs; unsigned argTotalRegArgs; unsigned argTotalTemps; unsigned argTotalLclVar; unsigned argTotalDeferred; unsigned argTotalConst; unsigned argTotalObjPtr; unsigned argTotalGTF_ASGinArgs; unsigned argMaxTempsPerMethod; unsigned argCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argCntTable(argCntBuckets); unsigned argDWordCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argDWordCntTable(argDWordCntBuckets); unsigned argDWordLngCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argDWordLngCntTable(argDWordLngCntBuckets); unsigned argTempsCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argTempsCntTable(argTempsCntBuckets); #endif // CALL_ARG_STATS /***************************************************************************** * * Variables to keep track of basic block counts. */ #if COUNT_BASIC_BLOCKS // -------------------------------------------------- // Basic block count frequency table: // -------------------------------------------------- // <= 1 ===> 26872 count ( 56% of total) // 2 .. 2 ===> 669 count ( 58% of total) // 3 .. 3 ===> 4687 count ( 68% of total) // 4 .. 5 ===> 5101 count ( 78% of total) // 6 .. 10 ===> 5575 count ( 90% of total) // 11 .. 20 ===> 3028 count ( 97% of total) // 21 .. 50 ===> 1108 count ( 99% of total) // 51 .. 100 ===> 182 count ( 99% of total) // 101 .. 1000 ===> 34 count (100% of total) // 1001 .. 10000 ===> 0 count (100% of total) // -------------------------------------------------- unsigned bbCntBuckets[] = {1, 2, 3, 5, 10, 20, 50, 100, 1000, 10000, 0}; Histogram bbCntTable(bbCntBuckets); /* Histogram for the IL opcode size of methods with a single basic block */ unsigned bbSizeBuckets[] = {1, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 0}; Histogram bbOneBBSizeTable(bbSizeBuckets); #endif // COUNT_BASIC_BLOCKS /***************************************************************************** * * Used by optFindNaturalLoops to gather statistical information such as * - total number of natural loops * - number of loops with 1, 2, ... exit conditions * - number of loops that have an iterator (for like) * - number of loops that have a constant iterator */ #if COUNT_LOOPS unsigned totalLoopMethods; // counts the total number of methods that have natural loops unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent unsigned totalLoopCount; // counts the total number of natural loops unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent unsigned iterLoopCount; // counts the # of loops with an iterator (for like) unsigned simpleTestLoopCount; // counts the # of loops with an iterator and a simple loop condition (iter < const) unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like) bool hasMethodLoops; // flag to keep track if we already counted a method as having loops unsigned loopsThisMethod; // counts the number of loops in the current method bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method. /* Histogram for number of loops in a method */ unsigned loopCountBuckets[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0}; Histogram loopCountTable(loopCountBuckets); /* Histogram for number of loop exits */ unsigned loopExitCountBuckets[] = {0, 1, 2, 3, 4, 5, 6, 0}; Histogram loopExitCountTable(loopExitCountBuckets); #endif // COUNT_LOOPS //------------------------------------------------------------------------ // getJitGCType: Given the VM's CorInfoGCType convert it to the JIT's var_types // // Arguments: // gcType - an enum value that originally came from an element // of the BYTE[] returned from getClassGClayout() // // Return Value: // The corresponsing enum value from the JIT's var_types // // Notes: // The gcLayout of each field of a struct is returned from getClassGClayout() // as a BYTE[] but each BYTE element is actually a CorInfoGCType value // Note when we 'know' that there is only one element in theis array // the JIT will often pass the address of a single BYTE, instead of a BYTE[] // var_types Compiler::getJitGCType(BYTE gcType) { var_types result = TYP_UNKNOWN; CorInfoGCType corInfoType = (CorInfoGCType)gcType; if (corInfoType == TYPE_GC_NONE) { result = TYP_I_IMPL; } else if (corInfoType == TYPE_GC_REF) { result = TYP_REF; } else if (corInfoType == TYPE_GC_BYREF) { result = TYP_BYREF; } else { noway_assert(!"Bad value of 'gcType'"); } return result; } #ifdef TARGET_X86 //--------------------------------------------------------------------------- // isTrivialPointerSizedStruct: // Check if the given struct type contains only one pointer-sized integer value type // // Arguments: // clsHnd - the handle for the struct type. // // Return Value: // true if the given struct type contains only one pointer-sized integer value type, // false otherwise. // bool Compiler::isTrivialPointerSizedStruct(CORINFO_CLASS_HANDLE clsHnd) const { assert(info.compCompHnd->isValueClass(clsHnd)); if (info.compCompHnd->getClassSize(clsHnd) != TARGET_POINTER_SIZE) { return false; } for (;;) { // all of class chain must be of value type and must have only one field if (!info.compCompHnd->isValueClass(clsHnd) || info.compCompHnd->getClassNumInstanceFields(clsHnd) != 1) { return false; } CORINFO_CLASS_HANDLE* pClsHnd = &clsHnd; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); CorInfoType fieldType = info.compCompHnd->getFieldType(fldHnd, pClsHnd); var_types vt = JITtype2varType(fieldType); if (fieldType == CORINFO_TYPE_VALUECLASS) { clsHnd = *pClsHnd; } else if (varTypeIsI(vt) && !varTypeIsGC(vt)) { return true; } else { return false; } } } #endif // TARGET_X86 //--------------------------------------------------------------------------- // isNativePrimitiveStructType: // Check if the given struct type is an intrinsic type that should be treated as though // it is not a struct at the unmanaged ABI boundary. // // Arguments: // clsHnd - the handle for the struct type. // // Return Value: // true if the given struct type should be treated as a primitive for unmanaged calls, // false otherwise. // bool Compiler::isNativePrimitiveStructType(CORINFO_CLASS_HANDLE clsHnd) { if (!isIntrinsicType(clsHnd)) { return false; } const char* namespaceName = nullptr; const char* typeName = getClassNameFromMetadata(clsHnd, &namespaceName); if (strcmp(namespaceName, "System.Runtime.InteropServices") != 0) { return false; } return strcmp(typeName, "CLong") == 0 || strcmp(typeName, "CULong") == 0 || strcmp(typeName, "NFloat") == 0; } //----------------------------------------------------------------------------- // getPrimitiveTypeForStruct: // Get the "primitive" type that is is used for a struct // of size 'structSize'. // We examine 'clsHnd' to check the GC layout of the struct and // return TYP_REF for structs that simply wrap an object. // If the struct is a one element HFA/HVA, we will return the // proper floating point or vector type. // // Arguments: // structSize - the size of the struct type, cannot be zero // clsHnd - the handle for the struct type, used when may have // an HFA or if we need the GC layout for an object ref. // // Return Value: // The primitive type (i.e. byte, short, int, long, ref, float, double) // used to pass or return structs of this size. // If we shouldn't use a "primitive" type then TYP_UNKNOWN is returned. // Notes: // For 32-bit targets (X86/ARM32) the 64-bit TYP_LONG type is not // considered a primitive type by this method. // So a struct that wraps a 'long' is passed and returned in the // same way as any other 8-byte struct // For ARM32 if we have an HFA struct that wraps a 64-bit double // we will return TYP_DOUBLE. // For vector calling conventions, a vector is considered a "primitive" // type, as it is passed in a single register. // var_types Compiler::getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd, bool isVarArg) { assert(structSize != 0); var_types useType = TYP_UNKNOWN; // Start by determining if we have an HFA/HVA with a single element. if (GlobalJitOptions::compFeatureHfa) { // Arm64 Windows VarArg methods arguments will not classify HFA types, they will need to be treated // as if they are not HFA types. if (!(TargetArchitecture::IsArm64 && TargetOS::IsWindows && isVarArg)) { switch (structSize) { case 4: case 8: #ifdef TARGET_ARM64 case 16: #endif // TARGET_ARM64 { var_types hfaType = GetHfaType(clsHnd); // We're only interested in the case where the struct size is equal to the size of the hfaType. if (varTypeIsValidHfaType(hfaType)) { if (genTypeSize(hfaType) == structSize) { useType = hfaType; } else { return TYP_UNKNOWN; } } } } if (useType != TYP_UNKNOWN) { return useType; } } } // Now deal with non-HFA/HVA structs. switch (structSize) { case 1: useType = TYP_BYTE; break; case 2: useType = TYP_SHORT; break; #if !defined(TARGET_XARCH) || defined(UNIX_AMD64_ABI) case 3: useType = TYP_INT; break; #endif // !TARGET_XARCH || UNIX_AMD64_ABI #ifdef TARGET_64BIT case 4: // We dealt with the one-float HFA above. All other 4-byte structs are handled as INT. useType = TYP_INT; break; #if !defined(TARGET_XARCH) || defined(UNIX_AMD64_ABI) case 5: case 6: case 7: useType = TYP_I_IMPL; break; #endif // !TARGET_XARCH || UNIX_AMD64_ABI #endif // TARGET_64BIT case TARGET_POINTER_SIZE: { BYTE gcPtr = 0; // Check if this pointer-sized struct is wrapping a GC object info.compCompHnd->getClassGClayout(clsHnd, &gcPtr); useType = getJitGCType(gcPtr); } break; default: useType = TYP_UNKNOWN; break; } return useType; } //----------------------------------------------------------------------------- // getArgTypeForStruct: // Get the type that is used to pass values of the given struct type. // If you have already retrieved the struct size then it should be // passed as the optional fourth argument, as this allows us to avoid // an extra call to getClassSize(clsHnd) // // Arguments: // clsHnd - the handle for the struct type // wbPassStruct - An "out" argument with information about how // the struct is to be passed // isVarArg - is vararg, used to ignore HFA types for Arm64 windows varargs // structSize - the size of the struct type, // or zero if we should call getClassSize(clsHnd) // // Return Value: // For wbPassStruct you can pass a 'nullptr' and nothing will be written // or returned for that out parameter. // When *wbPassStruct is SPK_PrimitiveType this method's return value // is the primitive type used to pass the struct. // When *wbPassStruct is SPK_ByReference this method's return value // is always TYP_UNKNOWN and the struct type is passed by reference to a copy // When *wbPassStruct is SPK_ByValue or SPK_ByValueAsHfa this method's return value // is always TYP_STRUCT and the struct type is passed by value either // using multiple registers or on the stack. // // Assumptions: // The size must be the size of the given type. // The given class handle must be for a value type (struct). // // Notes: // About HFA types: // When the clsHnd is a one element HFA type we return the appropriate // floating point primitive type and *wbPassStruct is SPK_PrimitiveType // If there are two or more elements in the HFA type then the this method's // return value is TYP_STRUCT and *wbPassStruct is SPK_ByValueAsHfa // var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, structPassingKind* wbPassStruct, bool isVarArg, unsigned structSize) { var_types useType = TYP_UNKNOWN; structPassingKind howToPassStruct = SPK_Unknown; // We must change this before we return assert(structSize != 0); // Determine if we can pass the struct as a primitive type. // Note that on x86 we only pass specific pointer-sized structs that satisfy isTrivialPointerSizedStruct checks. #ifndef TARGET_X86 #ifdef UNIX_AMD64_ABI // An 8-byte struct may need to be passed in a floating point register // So we always consult the struct "Classifier" routine // SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; eeGetSystemVAmd64PassStructInRegisterDescriptor(clsHnd, &structDesc); if (structDesc.passedInRegisters && (structDesc.eightByteCount != 1)) { // We can't pass this as a primitive type. } else if (structDesc.eightByteClassifications[0] == SystemVClassificationTypeSSE) { // If this is passed as a floating type, use that. // Otherwise, we'll use the general case - we don't want to use the "EightByteType" // directly, because it returns `TYP_INT` for any integral type <= 4 bytes, and // we need to preserve small types. useType = GetEightByteType(structDesc, 0); } else #endif // UNIX_AMD64_ABI // The largest arg passed in a single register is MAX_PASS_SINGLEREG_BYTES, // so we can skip calling getPrimitiveTypeForStruct when we // have a struct that is larger than that. // if (structSize <= MAX_PASS_SINGLEREG_BYTES) { // We set the "primitive" useType based upon the structSize // and also examine the clsHnd to see if it is an HFA of count one useType = getPrimitiveTypeForStruct(structSize, clsHnd, isVarArg); } #else if (isTrivialPointerSizedStruct(clsHnd)) { useType = TYP_I_IMPL; } #endif // !TARGET_X86 // Did we change this struct type into a simple "primitive" type? // if (useType != TYP_UNKNOWN) { // Yes, we should use the "primitive" type in 'useType' howToPassStruct = SPK_PrimitiveType; } else // We can't replace the struct with a "primitive" type { // See if we can pass this struct by value, possibly in multiple registers // or if we should pass it by reference to a copy // if (structSize <= MAX_PASS_MULTIREG_BYTES) { // Structs that are HFA/HVA's are passed by value in multiple registers. // Arm64 Windows VarArg methods arguments will not classify HFA/HVA types, they will need to be treated // as if they are not HFA/HVA types. var_types hfaType; if (TargetArchitecture::IsArm64 && TargetOS::IsWindows && isVarArg) { hfaType = TYP_UNDEF; } else { hfaType = GetHfaType(clsHnd); } if (varTypeIsValidHfaType(hfaType)) { // HFA's of count one should have been handled by getPrimitiveTypeForStruct assert(GetHfaCount(clsHnd) >= 2); // setup wbPassType and useType indicate that this is passed by value as an HFA // using multiple registers // (when all of the parameters registers are used, then the stack will be used) howToPassStruct = SPK_ByValueAsHfa; useType = TYP_STRUCT; } else // Not an HFA struct type { #ifdef UNIX_AMD64_ABI // The case of (structDesc.eightByteCount == 1) should have already been handled if ((structDesc.eightByteCount > 1) || !structDesc.passedInRegisters) { // setup wbPassType and useType indicate that this is passed by value in multiple registers // (when all of the parameters registers are used, then the stack will be used) howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; } else { assert(structDesc.eightByteCount == 0); // Otherwise we pass this struct by reference to a copy // setup wbPassType and useType indicate that this is passed using one register // (by reference to a copy) howToPassStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_ARM64) // Structs that are pointer sized or smaller should have been handled by getPrimitiveTypeForStruct assert(structSize > TARGET_POINTER_SIZE); // On ARM64 structs that are 9-16 bytes are passed by value in multiple registers // if (structSize <= (TARGET_POINTER_SIZE * 2)) { // setup wbPassType and useType indicate that this is passed by value in multiple registers // (when all of the parameters registers are used, then the stack will be used) howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; } else // a structSize that is 17-32 bytes in size { // Otherwise we pass this struct by reference to a copy // setup wbPassType and useType indicate that this is passed using one register // (by reference to a copy) howToPassStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_X86) || defined(TARGET_ARM) // Otherwise we pass this struct by value on the stack // setup wbPassType and useType indicate that this is passed by value according to the X86/ARM32 ABI howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; #else // TARGET_XXX noway_assert(!"Unhandled TARGET in getArgTypeForStruct (with FEATURE_MULTIREG_ARGS=1)"); #endif // TARGET_XXX } } else // (structSize > MAX_PASS_MULTIREG_BYTES) { // We have a (large) struct that can't be replaced with a "primitive" type // and can't be passed in multiple registers CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) || defined(TARGET_ARM) || defined(UNIX_AMD64_ABI) // Otherwise we pass this struct by value on the stack // setup wbPassType and useType indicate that this is passed by value according to the X86/ARM32 ABI howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; #elif defined(TARGET_AMD64) || defined(TARGET_ARM64) // Otherwise we pass this struct by reference to a copy // setup wbPassType and useType indicate that this is passed using one register (by reference to a copy) howToPassStruct = SPK_ByReference; useType = TYP_UNKNOWN; #else // TARGET_XXX noway_assert(!"Unhandled TARGET in getArgTypeForStruct"); #endif // TARGET_XXX } } // 'howToPassStruct' must be set to one of the valid values before we return assert(howToPassStruct != SPK_Unknown); if (wbPassStruct != nullptr) { *wbPassStruct = howToPassStruct; } return useType; } //----------------------------------------------------------------------------- // getReturnTypeForStruct: // Get the type that is used to return values of the given struct type. // If you have already retrieved the struct size then it should be // passed as the optional third argument, as this allows us to avoid // an extra call to getClassSize(clsHnd) // // Arguments: // clsHnd - the handle for the struct type // callConv - the calling convention of the function // that returns this struct. // wbReturnStruct - An "out" argument with information about how // the struct is to be returned // structSize - the size of the struct type, // or zero if we should call getClassSize(clsHnd) // // Return Value: // For wbReturnStruct you can pass a 'nullptr' and nothing will be written // or returned for that out parameter. // When *wbReturnStruct is SPK_PrimitiveType this method's return value // is the primitive type used to return the struct. // When *wbReturnStruct is SPK_ByReference this method's return value // is always TYP_UNKNOWN and the struct type is returned using a return buffer // When *wbReturnStruct is SPK_ByValue or SPK_ByValueAsHfa this method's return value // is always TYP_STRUCT and the struct type is returned using multiple registers. // // Assumptions: // The size must be the size of the given type. // The given class handle must be for a value type (struct). // // Notes: // About HFA types: // When the clsHnd is a one element HFA type then this method's return // value is the appropriate floating point primitive type and // *wbReturnStruct is SPK_PrimitiveType. // If there are two or more elements in the HFA type and the target supports // multireg return types then the return value is TYP_STRUCT and // *wbReturnStruct is SPK_ByValueAsHfa. // Additionally if there are two or more elements in the HFA type and // the target doesn't support multreg return types then it is treated // as if it wasn't an HFA type. // About returning TYP_STRUCT: // Whenever this method's return value is TYP_STRUCT it always means // that multiple registers are used to return this struct. // var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, CorInfoCallConvExtension callConv, structPassingKind* wbReturnStruct /* = nullptr */, unsigned structSize /* = 0 */) { var_types useType = TYP_UNKNOWN; structPassingKind howToReturnStruct = SPK_Unknown; // We must change this before we return bool canReturnInRegister = true; assert(clsHnd != NO_CLASS_HANDLE); if (structSize == 0) { structSize = info.compCompHnd->getClassSize(clsHnd); } assert(structSize > 0); #ifdef UNIX_AMD64_ABI // An 8-byte struct may need to be returned in a floating point register // So we always consult the struct "Classifier" routine // SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; eeGetSystemVAmd64PassStructInRegisterDescriptor(clsHnd, &structDesc); if (structDesc.eightByteCount == 1) { assert(structSize <= sizeof(double)); assert(structDesc.passedInRegisters); if (structDesc.eightByteClassifications[0] == SystemVClassificationTypeSSE) { // If this is returned as a floating type, use that. // Otherwise, leave as TYP_UNKONWN and we'll sort things out below. useType = GetEightByteType(structDesc, 0); howToReturnStruct = SPK_PrimitiveType; } } else { // Return classification is not always size based... canReturnInRegister = structDesc.passedInRegisters; if (!canReturnInRegister) { assert(structDesc.eightByteCount == 0); howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } } #elif UNIX_X86_ABI if (callConv != CorInfoCallConvExtension::Managed && !isNativePrimitiveStructType(clsHnd)) { canReturnInRegister = false; howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #endif if (TargetOS::IsWindows && !TargetArchitecture::IsArm32 && callConvIsInstanceMethodCallConv(callConv) && !isNativePrimitiveStructType(clsHnd)) { canReturnInRegister = false; howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } // Check for cases where a small struct is returned in a register // via a primitive type. // // The largest "primitive type" is MAX_PASS_SINGLEREG_BYTES // so we can skip calling getPrimitiveTypeForStruct when we // have a struct that is larger than that. if (canReturnInRegister && (useType == TYP_UNKNOWN) && (structSize <= MAX_PASS_SINGLEREG_BYTES)) { // We set the "primitive" useType based upon the structSize // and also examine the clsHnd to see if it is an HFA of count one // // The ABI for struct returns in varArg methods, is same as the normal case, // so pass false for isVararg useType = getPrimitiveTypeForStruct(structSize, clsHnd, /*isVararg=*/false); if (useType != TYP_UNKNOWN) { if (structSize == genTypeSize(useType)) { // Currently: 1, 2, 4, or 8 byte structs howToReturnStruct = SPK_PrimitiveType; } else { // Currently: 3, 5, 6, or 7 byte structs assert(structSize < genTypeSize(useType)); howToReturnStruct = SPK_EnclosingType; } } } #ifdef TARGET_64BIT // Note this handles an odd case when FEATURE_MULTIREG_RET is disabled and HFAs are enabled // // getPrimitiveTypeForStruct will return TYP_UNKNOWN for a struct that is an HFA of two floats // because when HFA are enabled, normally we would use two FP registers to pass or return it // // But if we don't have support for multiple register return types, we have to change this. // Since what we have is an 8-byte struct (float + float) we change useType to TYP_I_IMPL // so that the struct is returned instead using an 8-byte integer register. // if ((FEATURE_MULTIREG_RET == 0) && (useType == TYP_UNKNOWN) && (structSize == (2 * sizeof(float))) && IsHfa(clsHnd)) { useType = TYP_I_IMPL; howToReturnStruct = SPK_PrimitiveType; } #endif // Did we change this struct type into a simple "primitive" type? if (useType != TYP_UNKNOWN) { // If so, we should have already set howToReturnStruct, too. assert(howToReturnStruct != SPK_Unknown); } else if (canReturnInRegister) // We can't replace the struct with a "primitive" type { // See if we can return this struct by value, possibly in multiple registers // or if we should return it using a return buffer register // if ((FEATURE_MULTIREG_RET == 1) && (structSize <= MAX_RET_MULTIREG_BYTES)) { // Structs that are HFA's are returned in multiple registers if (IsHfa(clsHnd)) { // HFA's of count one should have been handled by getPrimitiveTypeForStruct assert(GetHfaCount(clsHnd) >= 2); // setup wbPassType and useType indicate that this is returned by value as an HFA // using multiple registers howToReturnStruct = SPK_ByValueAsHfa; useType = TYP_STRUCT; } else // Not an HFA struct type { #ifdef UNIX_AMD64_ABI // The cases of (structDesc.eightByteCount == 1) and (structDesc.eightByteCount == 0) // should have already been handled assert(structDesc.eightByteCount > 1); // setup wbPassType and useType indicate that this is returned by value in multiple registers howToReturnStruct = SPK_ByValue; useType = TYP_STRUCT; assert(structDesc.passedInRegisters == true); #elif defined(TARGET_ARM64) // Structs that are pointer sized or smaller should have been handled by getPrimitiveTypeForStruct assert(structSize > TARGET_POINTER_SIZE); // On ARM64 structs that are 9-16 bytes are returned by value in multiple registers // if (structSize <= (TARGET_POINTER_SIZE * 2)) { // setup wbPassType and useType indicate that this is return by value in multiple registers howToReturnStruct = SPK_ByValue; useType = TYP_STRUCT; } else // a structSize that is 17-32 bytes in size { // Otherwise we return this struct using a return buffer // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_X86) // Only 8-byte structs are return in multiple registers. // We also only support multireg struct returns on x86 to match the native calling convention. // So return 8-byte structs only when the calling convention is a native calling convention. if (structSize == MAX_RET_MULTIREG_BYTES && callConv != CorInfoCallConvExtension::Managed) { // setup wbPassType and useType indicate that this is return by value in multiple registers howToReturnStruct = SPK_ByValue; useType = TYP_STRUCT; } else { // Otherwise we return this struct using a return buffer // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_ARM) // Otherwise we return this struct using a return buffer // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; #else // TARGET_XXX noway_assert(!"Unhandled TARGET in getReturnTypeForStruct (with FEATURE_MULTIREG_ARGS=1)"); #endif // TARGET_XXX } } else // (structSize > MAX_RET_MULTIREG_BYTES) || (FEATURE_MULTIREG_RET == 0) { // We have a (large) struct that can't be replaced with a "primitive" type // and can't be returned in multiple registers // We return this struct using a return buffer register // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } } // 'howToReturnStruct' must be set to one of the valid values before we return assert(howToReturnStruct != SPK_Unknown); if (wbReturnStruct != nullptr) { *wbReturnStruct = howToReturnStruct; } return useType; } /////////////////////////////////////////////////////////////////////////////// // // MEASURE_NOWAY: code to measure and rank dynamic occurrences of noway_assert. // (Just the appearances of noway_assert, whether the assert is true or false.) // This might help characterize the cost of noway_assert in non-DEBUG builds, // or determine which noway_assert should be simple DEBUG-only asserts. // /////////////////////////////////////////////////////////////////////////////// #if MEASURE_NOWAY struct FileLine { char* m_file; unsigned m_line; char* m_condStr; FileLine() : m_file(nullptr), m_line(0), m_condStr(nullptr) { } FileLine(const char* file, unsigned line, const char* condStr) : m_line(line) { size_t newSize = (strlen(file) + 1) * sizeof(char); m_file = HostAllocator::getHostAllocator().allocate<char>(newSize); strcpy_s(m_file, newSize, file); newSize = (strlen(condStr) + 1) * sizeof(char); m_condStr = HostAllocator::getHostAllocator().allocate<char>(newSize); strcpy_s(m_condStr, newSize, condStr); } FileLine(const FileLine& other) { m_file = other.m_file; m_line = other.m_line; m_condStr = other.m_condStr; } // GetHashCode() and Equals() are needed by JitHashTable static unsigned GetHashCode(FileLine fl) { assert(fl.m_file != nullptr); unsigned code = fl.m_line; for (const char* p = fl.m_file; *p != '\0'; p++) { code += *p; } // Could also add condStr. return code; } static bool Equals(FileLine fl1, FileLine fl2) { return (fl1.m_line == fl2.m_line) && (0 == strcmp(fl1.m_file, fl2.m_file)); } }; typedef JitHashTable<FileLine, FileLine, size_t, HostAllocator> FileLineToCountMap; FileLineToCountMap* NowayAssertMap; void Compiler::RecordNowayAssert(const char* filename, unsigned line, const char* condStr) { if (NowayAssertMap == nullptr) { NowayAssertMap = new (HostAllocator::getHostAllocator()) FileLineToCountMap(HostAllocator::getHostAllocator()); } FileLine fl(filename, line, condStr); size_t* pCount = NowayAssertMap->LookupPointer(fl); if (pCount == nullptr) { NowayAssertMap->Set(fl, 1); } else { ++(*pCount); } } void RecordNowayAssertGlobal(const char* filename, unsigned line, const char* condStr) { if ((JitConfig.JitMeasureNowayAssert() == 1) && (JitTls::GetCompiler() != nullptr)) { JitTls::GetCompiler()->RecordNowayAssert(filename, line, condStr); } } struct NowayAssertCountMap { size_t count; FileLine fl; NowayAssertCountMap() : count(0) { } struct compare { bool operator()(const NowayAssertCountMap& elem1, const NowayAssertCountMap& elem2) { return (ssize_t)elem2.count < (ssize_t)elem1.count; // sort in descending order } }; }; void DisplayNowayAssertMap() { if (NowayAssertMap != nullptr) { FILE* fout; LPCWSTR strJitMeasureNowayAssertFile = JitConfig.JitMeasureNowayAssertFile(); if (strJitMeasureNowayAssertFile != nullptr) { fout = _wfopen(strJitMeasureNowayAssertFile, W("a")); if (fout == nullptr) { fprintf(jitstdout, "Failed to open JitMeasureNowayAssertFile \"%ws\"\n", strJitMeasureNowayAssertFile); return; } } else { fout = jitstdout; } // Iterate noway assert map, create sorted table by occurrence, dump it. unsigned count = NowayAssertMap->GetCount(); NowayAssertCountMap* nacp = new NowayAssertCountMap[count]; unsigned i = 0; for (FileLineToCountMap::KeyIterator iter = NowayAssertMap->Begin(), end = NowayAssertMap->End(); !iter.Equal(end); ++iter) { nacp[i].count = iter.GetValue(); nacp[i].fl = iter.Get(); ++i; } jitstd::sort(nacp, nacp + count, NowayAssertCountMap::compare()); if (fout == jitstdout) { // Don't output the header if writing to a file, since we'll be appending to existing dumps in that case. fprintf(fout, "\nnoway_assert counts:\n"); fprintf(fout, "count, file, line, text\n"); } for (i = 0; i < count; i++) { fprintf(fout, "%u, %s, %u, \"%s\"\n", nacp[i].count, nacp[i].fl.m_file, nacp[i].fl.m_line, nacp[i].fl.m_condStr); } if (fout != jitstdout) { fclose(fout); fout = nullptr; } } } #endif // MEASURE_NOWAY /***************************************************************************** * variables to keep track of how many iterations we go in a dataflow pass */ #if DATAFLOW_ITER unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow #endif // DATAFLOW_ITER #if MEASURE_BLOCK_SIZE size_t genFlowNodeSize; size_t genFlowNodeCnt; #endif // MEASURE_BLOCK_SIZE /*****************************************************************************/ // We keep track of methods we've already compiled. /***************************************************************************** * Declare the statics */ #ifdef DEBUG /* static */ LONG Compiler::s_compMethodsCount = 0; // to produce unique label names #endif #if MEASURE_MEM_ALLOC /* static */ bool Compiler::s_dspMemStats = false; #endif #ifndef PROFILING_SUPPORTED const bool Compiler::Options::compNoPInvokeInlineCB = false; #endif /***************************************************************************** * * One time initialization code */ /* static */ void Compiler::compStartup() { #if DISPLAY_SIZES grossVMsize = grossNCsize = totalNCsize = 0; #endif // DISPLAY_SIZES /* Initialize the table of tree node sizes */ GenTree::InitNodeSize(); #ifdef JIT32_GCENCODER // Initialize the GC encoder lookup table GCInfo::gcInitEncoderLookupTable(); #endif /* Initialize the emitter */ emitter::emitInit(); // Static vars of ValueNumStore ValueNumStore::InitValueNumStoreStatics(); compDisplayStaticSizes(jitstdout); } /***************************************************************************** * * One time finalization code */ /* static */ void Compiler::compShutdown() { if (s_pAltJitExcludeAssembliesList != nullptr) { s_pAltJitExcludeAssembliesList->~AssemblyNamesList2(); // call the destructor s_pAltJitExcludeAssembliesList = nullptr; } #ifdef DEBUG if (s_pJitDisasmIncludeAssembliesList != nullptr) { s_pJitDisasmIncludeAssembliesList->~AssemblyNamesList2(); // call the destructor s_pJitDisasmIncludeAssembliesList = nullptr; } #endif // DEBUG #if MEASURE_NOWAY DisplayNowayAssertMap(); #endif // MEASURE_NOWAY /* Shut down the emitter */ emitter::emitDone(); #if defined(DEBUG) || defined(INLINE_DATA) // Finish reading and/or writing inline xml if (JitConfig.JitInlineDumpXmlFile() != nullptr) { FILE* file = _wfopen(JitConfig.JitInlineDumpXmlFile(), W("a")); if (file != nullptr) { InlineStrategy::FinalizeXml(file); fclose(file); } else { InlineStrategy::FinalizeXml(); } } #endif // defined(DEBUG) || defined(INLINE_DATA) #if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS if (genMethodCnt == 0) { return; } #endif #if NODEBASH_STATS GenTree::ReportOperBashing(jitstdout); #endif // Where should we write our statistics output? FILE* fout = jitstdout; #ifdef FEATURE_JIT_METHOD_PERF if (compJitTimeLogFilename != nullptr) { FILE* jitTimeLogFile = _wfopen(compJitTimeLogFilename, W("a")); if (jitTimeLogFile != nullptr) { CompTimeSummaryInfo::s_compTimeSummary.Print(jitTimeLogFile); fclose(jitTimeLogFile); } } JitTimer::Shutdown(); #endif // FEATURE_JIT_METHOD_PERF #if COUNT_AST_OPERS // Add up all the counts so that we can show percentages of total unsigned totalCount = 0; for (unsigned op = 0; op < GT_COUNT; op++) { totalCount += GenTree::s_gtNodeCounts[op]; } if (totalCount > 0) { struct OperInfo { unsigned Count; unsigned Size; genTreeOps Oper; }; OperInfo opers[GT_COUNT]; for (unsigned op = 0; op < GT_COUNT; op++) { opers[op] = {GenTree::s_gtNodeCounts[op], GenTree::s_gtTrueSizes[op], static_cast<genTreeOps>(op)}; } jitstd::sort(opers, opers + ArrLen(opers), [](const OperInfo& l, const OperInfo& r) { // We'll be sorting in descending order. return l.Count >= r.Count; }); unsigned remainingCount = totalCount; unsigned remainingCountLarge = 0; unsigned remainingCountSmall = 0; unsigned countLarge = 0; unsigned countSmall = 0; fprintf(fout, "\nGenTree operator counts (approximate):\n\n"); for (OperInfo oper : opers) { unsigned size = oper.Size; unsigned count = oper.Count; double percentage = 100.0 * count / totalCount; if (size > TREE_NODE_SZ_SMALL) { countLarge += count; } else { countSmall += count; } // Let's not show anything below a threshold if (percentage >= 0.5) { fprintf(fout, " GT_%-17s %7u (%4.1lf%%) %3u bytes each\n", GenTree::OpName(oper.Oper), count, percentage, size); remainingCount -= count; } else { if (size > TREE_NODE_SZ_SMALL) { remainingCountLarge += count; } else { remainingCountSmall += count; } } } if (remainingCount > 0) { fprintf(fout, " All other GT_xxx ... %7u (%4.1lf%%) ... %4.1lf%% small + %4.1lf%% large\n", remainingCount, 100.0 * remainingCount / totalCount, 100.0 * remainingCountSmall / totalCount, 100.0 * remainingCountLarge / totalCount); } fprintf(fout, " -----------------------------------------------------\n"); fprintf(fout, " Total ....... %11u --ALL-- ... %4.1lf%% small + %4.1lf%% large\n", totalCount, 100.0 * countSmall / totalCount, 100.0 * countLarge / totalCount); fprintf(fout, "\n"); } #endif // COUNT_AST_OPERS #if DISPLAY_SIZES if (grossVMsize && grossNCsize) { fprintf(fout, "\n"); fprintf(fout, "--------------------------------------\n"); fprintf(fout, "Function and GC info size stats\n"); fprintf(fout, "--------------------------------------\n"); fprintf(fout, "[%7u VM, %8u %6s %4u%%] %s\n", grossVMsize, grossNCsize, Target::g_tgtCPUName, 100 * grossNCsize / grossVMsize, "Total (excluding GC info)"); fprintf(fout, "[%7u VM, %8u %6s %4u%%] %s\n", grossVMsize, totalNCsize, Target::g_tgtCPUName, 100 * totalNCsize / grossVMsize, "Total (including GC info)"); if (gcHeaderISize || gcHeaderNSize) { fprintf(fout, "\n"); fprintf(fout, "GC tables : [%7uI,%7uN] %7u byt (%u%% of IL, %u%% of %s).\n", gcHeaderISize + gcPtrMapISize, gcHeaderNSize + gcPtrMapNSize, totalNCsize - grossNCsize, 100 * (totalNCsize - grossNCsize) / grossVMsize, 100 * (totalNCsize - grossNCsize) / grossNCsize, Target::g_tgtCPUName); fprintf(fout, "GC headers : [%7uI,%7uN] %7u byt, [%4.1fI,%4.1fN] %4.1f byt/meth\n", gcHeaderISize, gcHeaderNSize, gcHeaderISize + gcHeaderNSize, (float)gcHeaderISize / (genMethodICnt + 0.001), (float)gcHeaderNSize / (genMethodNCnt + 0.001), (float)(gcHeaderISize + gcHeaderNSize) / genMethodCnt); fprintf(fout, "GC ptr maps : [%7uI,%7uN] %7u byt, [%4.1fI,%4.1fN] %4.1f byt/meth\n", gcPtrMapISize, gcPtrMapNSize, gcPtrMapISize + gcPtrMapNSize, (float)gcPtrMapISize / (genMethodICnt + 0.001), (float)gcPtrMapNSize / (genMethodNCnt + 0.001), (float)(gcPtrMapISize + gcPtrMapNSize) / genMethodCnt); } else { fprintf(fout, "\n"); fprintf(fout, "GC tables take up %u bytes (%u%% of instr, %u%% of %6s code).\n", totalNCsize - grossNCsize, 100 * (totalNCsize - grossNCsize) / grossVMsize, 100 * (totalNCsize - grossNCsize) / grossNCsize, Target::g_tgtCPUName); } #ifdef DEBUG #if DOUBLE_ALIGN fprintf(fout, "%u out of %u methods generated with double-aligned stack\n", Compiler::s_lvaDoubleAlignedProcsCount, genMethodCnt); #endif #endif } #endif // DISPLAY_SIZES #if CALL_ARG_STATS compDispCallArgStats(fout); #endif #if COUNT_BASIC_BLOCKS fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Basic block count frequency table:\n"); fprintf(fout, "--------------------------------------------------\n"); bbCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "IL method size frequency table for methods with a single basic block:\n"); fprintf(fout, "--------------------------------------------------\n"); bbOneBBSizeTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); #endif // COUNT_BASIC_BLOCKS #if COUNT_LOOPS fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Loop stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Total number of methods with loops is %5u\n", totalLoopMethods); fprintf(fout, "Total number of loops is %5u\n", totalLoopCount); fprintf(fout, "Maximum number of loops per method is %5u\n", maxLoopsPerMethod); fprintf(fout, "# of methods overflowing nat loop table is %5u\n", totalLoopOverflows); fprintf(fout, "Total number of 'unnatural' loops is %5u\n", totalUnnatLoopCount); fprintf(fout, "# of methods overflowing unnat loop limit is %5u\n", totalUnnatLoopOverflows); fprintf(fout, "Total number of loops with an iterator is %5u\n", iterLoopCount); fprintf(fout, "Total number of loops with a simple iterator is %5u\n", simpleTestLoopCount); fprintf(fout, "Total number of loops with a constant iterator is %5u\n", constIterLoopCount); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Loop count frequency table:\n"); fprintf(fout, "--------------------------------------------------\n"); loopCountTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Loop exit count frequency table:\n"); fprintf(fout, "--------------------------------------------------\n"); loopExitCountTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); #endif // COUNT_LOOPS #if DATAFLOW_ITER fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Total number of iterations in the CSE dataflow loop is %5u\n", CSEiterCount); fprintf(fout, "Total number of iterations in the CF dataflow loop is %5u\n", CFiterCount); #endif // DATAFLOW_ITER #if MEASURE_NODE_SIZE fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "GenTree node allocation stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Allocated %6I64u tree nodes (%7I64u bytes total, avg %4I64u bytes per method)\n", genNodeSizeStats.genTreeNodeCnt, genNodeSizeStats.genTreeNodeSize, genNodeSizeStats.genTreeNodeSize / genMethodCnt); fprintf(fout, "Allocated %7I64u bytes of unused tree node space (%3.2f%%)\n", genNodeSizeStats.genTreeNodeSize - genNodeSizeStats.genTreeNodeActualSize, (float)(100 * (genNodeSizeStats.genTreeNodeSize - genNodeSizeStats.genTreeNodeActualSize)) / genNodeSizeStats.genTreeNodeSize); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of per-method GenTree node counts:\n"); genTreeNcntHist.dump(fout); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of per-method GenTree node allocations (in bytes):\n"); genTreeNsizHist.dump(fout); #endif // MEASURE_NODE_SIZE #if MEASURE_BLOCK_SIZE fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "BasicBlock and flowList/BasicBlockList allocation stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Allocated %6u basic blocks (%7u bytes total, avg %4u bytes per method)\n", BasicBlock::s_Count, BasicBlock::s_Size, BasicBlock::s_Size / genMethodCnt); fprintf(fout, "Allocated %6u flow nodes (%7u bytes total, avg %4u bytes per method)\n", genFlowNodeCnt, genFlowNodeSize, genFlowNodeSize / genMethodCnt); #endif // MEASURE_BLOCK_SIZE #if MEASURE_MEM_ALLOC if (s_dspMemStats) { fprintf(fout, "\nAll allocations:\n"); ArenaAllocator::dumpAggregateMemStats(jitstdout); fprintf(fout, "\nLargest method:\n"); ArenaAllocator::dumpMaxMemStats(jitstdout); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of total memory allocated per method (in KB):\n"); memAllocHist.dump(fout); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of total memory used per method (in KB):\n"); memUsedHist.dump(fout); } #endif // MEASURE_MEM_ALLOC #if LOOP_HOIST_STATS #ifdef DEBUG // Always display loop stats in retail if (JitConfig.DisplayLoopHoistStats() != 0) #endif // DEBUG { PrintAggregateLoopHoistStats(jitstdout); } #endif // LOOP_HOIST_STATS #if TRACK_ENREG_STATS if (JitConfig.JitEnregStats() != 0) { s_enregisterStats.Dump(fout); } #endif // TRACK_ENREG_STATS #if MEASURE_PTRTAB_SIZE fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "GC pointer table stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Reg pointer descriptor size (internal): %8u (avg %4u per method)\n", GCInfo::s_gcRegPtrDscSize, GCInfo::s_gcRegPtrDscSize / genMethodCnt); fprintf(fout, "Total pointer table size: %8u (avg %4u per method)\n", GCInfo::s_gcTotalPtrTabSize, GCInfo::s_gcTotalPtrTabSize / genMethodCnt); #endif // MEASURE_PTRTAB_SIZE #if MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || MEASURE_PTRTAB_SIZE || DISPLAY_SIZES if (genMethodCnt != 0) { fprintf(fout, "\n"); fprintf(fout, "A total of %6u methods compiled", genMethodCnt); #if DISPLAY_SIZES if (genMethodICnt || genMethodNCnt) { fprintf(fout, " (%u interruptible, %u non-interruptible)", genMethodICnt, genMethodNCnt); } #endif // DISPLAY_SIZES fprintf(fout, ".\n"); } #endif // MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || MEASURE_PTRTAB_SIZE || DISPLAY_SIZES #if EMITTER_STATS emitterStats(fout); #endif #if MEASURE_FATAL fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Fatal errors stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, " badCode: %u\n", fatal_badCode); fprintf(fout, " noWay: %u\n", fatal_noWay); fprintf(fout, " implLimitation: %u\n", fatal_implLimitation); fprintf(fout, " NOMEM: %u\n", fatal_NOMEM); fprintf(fout, " noWayAssertBody: %u\n", fatal_noWayAssertBody); #ifdef DEBUG fprintf(fout, " noWayAssertBodyArgs: %u\n", fatal_noWayAssertBodyArgs); #endif // DEBUG fprintf(fout, " NYI: %u\n", fatal_NYI); #endif // MEASURE_FATAL } /***************************************************************************** * Display static data structure sizes. */ /* static */ void Compiler::compDisplayStaticSizes(FILE* fout) { #if MEASURE_NODE_SIZE GenTree::DumpNodeSizes(fout); #endif #if EMITTER_STATS emitterStaticStats(fout); #endif } /***************************************************************************** * * Constructor */ void Compiler::compInit(ArenaAllocator* pAlloc, CORINFO_METHOD_HANDLE methodHnd, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, InlineInfo* inlineInfo) { assert(pAlloc); compArenaAllocator = pAlloc; // Inlinee Compile object will only be allocated when needed for the 1st time. InlineeCompiler = nullptr; // Set the inline info. impInlineInfo = inlineInfo; info.compCompHnd = compHnd; info.compMethodHnd = methodHnd; info.compMethodInfo = methodInfo; #ifdef DEBUG bRangeAllowStress = false; #endif #if defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS // Initialize the method name and related info, as it is used early in determining whether to // apply stress modes, and which ones to apply. // Note that even allocating memory can invoke the stress mechanism, so ensure that both // 'compMethodName' and 'compFullName' are either null or valid before we allocate. // (The stress mode checks references these prior to checking bRangeAllowStress.) // info.compMethodName = nullptr; info.compClassName = nullptr; info.compFullName = nullptr; const char* classNamePtr; const char* methodName; methodName = eeGetMethodName(methodHnd, &classNamePtr); unsigned len = (unsigned)roundUp(strlen(classNamePtr) + 1); info.compClassName = getAllocator(CMK_DebugOnly).allocate<char>(len); info.compMethodName = methodName; strcpy_s((char*)info.compClassName, len, classNamePtr); info.compFullName = eeGetMethodFullName(methodHnd); info.compPerfScore = 0.0; info.compMethodSuperPMIIndex = g_jitHost->getIntConfigValue(W("SuperPMIMethodContextNumber"), -1); #endif // defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS #if defined(DEBUG) || defined(INLINE_DATA) info.compMethodHashPrivate = 0; #endif // defined(DEBUG) || defined(INLINE_DATA) #ifdef DEBUG // Opt-in to jit stress based on method hash ranges. // // Note the default (with JitStressRange not set) is that all // methods will be subject to stress. static ConfigMethodRange fJitStressRange; fJitStressRange.EnsureInit(JitConfig.JitStressRange()); assert(!fJitStressRange.Error()); bRangeAllowStress = fJitStressRange.Contains(info.compMethodHash()); #endif // DEBUG eeInfoInitialized = false; compDoAggressiveInlining = false; if (compIsForInlining()) { m_inlineStrategy = nullptr; compInlineResult = inlineInfo->inlineResult; } else { m_inlineStrategy = new (this, CMK_Inlining) InlineStrategy(this); compInlineResult = nullptr; } // Initialize this to the first phase to run. mostRecentlyActivePhase = PHASE_PRE_IMPORT; // Initially, no phase checks are active. activePhaseChecks = PhaseChecks::CHECK_NONE; #ifdef FEATURE_TRACELOGGING // Make sure JIT telemetry is initialized as soon as allocations can be made // but no later than a point where noway_asserts can be thrown. // 1. JIT telemetry could allocate some objects internally. // 2. NowayAsserts are tracked through telemetry. // Note: JIT telemetry could gather data when compiler is not fully initialized. // So you have to initialize the compiler variables you use for telemetry. assert((unsigned)PHASE_PRE_IMPORT == 0); info.compILCodeSize = 0; info.compMethodHnd = nullptr; compJitTelemetry.Initialize(this); #endif fgInit(); lvaInit(); if (!compIsForInlining()) { codeGen = getCodeGenerator(this); optInit(); hashBv::Init(this); compVarScopeMap = nullptr; // If this method were a real constructor for Compiler, these would // become method initializations. impPendingBlockMembers = JitExpandArray<BYTE>(getAllocator()); impSpillCliquePredMembers = JitExpandArray<BYTE>(getAllocator()); impSpillCliqueSuccMembers = JitExpandArray<BYTE>(getAllocator()); new (&genIPmappings, jitstd::placement_t()) jitstd::list<IPmappingDsc>(getAllocator(CMK_DebugInfo)); #ifdef DEBUG new (&genPreciseIPmappings, jitstd::placement_t()) jitstd::list<PreciseIPMapping>(getAllocator(CMK_DebugOnly)); #endif lvMemoryPerSsaData = SsaDefArray<SsaMemDef>(); // // Initialize all the per-method statistics gathering data structures. // optLoopsCloned = 0; #if LOOP_HOIST_STATS m_loopsConsidered = 0; m_curLoopHasHoistedExpression = false; m_loopsWithHoistedExpressions = 0; m_totalHoistedExpressions = 0; #endif // LOOP_HOIST_STATS #if MEASURE_NODE_SIZE genNodeSizeStatsPerFunc.Init(); #endif // MEASURE_NODE_SIZE } else { codeGen = nullptr; } compJmpOpUsed = false; compLongUsed = false; compTailCallUsed = false; compTailPrefixSeen = false; compLocallocSeen = false; compLocallocUsed = false; compLocallocOptimized = false; compQmarkRationalized = false; compQmarkUsed = false; compFloatingPointUsed = false; compSuppressedZeroInit = false; compNeedsGSSecurityCookie = false; compGSReorderStackLayout = false; compGeneratingProlog = false; compGeneratingEpilog = false; compLSRADone = false; compRationalIRForm = false; #ifdef DEBUG compCodeGenDone = false; opts.compMinOptsIsUsed = false; #endif opts.compMinOptsIsSet = false; // Used by fgFindJumpTargets for inlining heuristics. opts.instrCount = 0; // Used to track when we should consider running EarlyProp optMethodFlags = 0; optNoReturnCallCount = 0; #ifdef DEBUG m_nodeTestData = nullptr; m_loopHoistCSEClass = FIRST_LOOP_HOIST_CSE_CLASS; #endif m_switchDescMap = nullptr; m_blockToEHPreds = nullptr; m_fieldSeqStore = nullptr; m_zeroOffsetFieldMap = nullptr; m_arrayInfoMap = nullptr; m_refAnyClass = nullptr; for (MemoryKind memoryKind : allMemoryKinds()) { m_memorySsaMap[memoryKind] = nullptr; } #ifdef DEBUG if (!compIsForInlining()) { compDoComponentUnitTestsOnce(); } #endif // DEBUG vnStore = nullptr; m_opAsgnVarDefSsaNums = nullptr; m_nodeToLoopMemoryBlockMap = nullptr; fgSsaPassesCompleted = 0; fgVNPassesCompleted = 0; // check that HelperCallProperties are initialized assert(s_helperCallProperties.IsPure(CORINFO_HELP_GETSHARED_GCSTATIC_BASE)); assert(!s_helperCallProperties.IsPure(CORINFO_HELP_GETFIELDOBJ)); // quick sanity check // We start with the flow graph in tree-order fgOrder = FGOrderTree; m_classLayoutTable = nullptr; #ifdef FEATURE_SIMD m_simdHandleCache = nullptr; #endif // FEATURE_SIMD compUsesThrowHelper = false; } /***************************************************************************** * * Destructor */ void Compiler::compDone() { } void* Compiler::compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ void** ppIndirection) /* OUT */ { void* addr; if (info.compMatchedVM) { addr = info.compCompHnd->getHelperFtn(ftnNum, ppIndirection); } else { // If we don't have a matched VM, we won't get valid results when asking for a helper function. addr = UlongToPtr(0xCA11CA11); // "callcall" } return addr; } unsigned Compiler::compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd) { var_types sigType = genActualType(JITtype2varType(cit)); unsigned sigSize; sigSize = genTypeSize(sigType); if (cit == CORINFO_TYPE_VALUECLASS) { sigSize = info.compCompHnd->getClassSize(clsHnd); } else if (cit == CORINFO_TYPE_REFANY) { sigSize = 2 * TARGET_POINTER_SIZE; } return sigSize; } #ifdef DEBUG static bool DidComponentUnitTests = false; void Compiler::compDoComponentUnitTestsOnce() { if (!JitConfig.RunComponentUnitTests()) { return; } if (!DidComponentUnitTests) { DidComponentUnitTests = true; ValueNumStore::RunTests(this); BitSetSupport::TestSuite(getAllocatorDebugOnly()); } } //------------------------------------------------------------------------ // compGetJitDefaultFill: // // Return Value: // An unsigned char value used to initizalize memory allocated by the JIT. // The default value is taken from COMPLUS_JitDefaultFill, if is not set // the value will be 0xdd. When JitStress is active a random value based // on the method hash is used. // // Notes: // Note that we can't use small values like zero, because we have some // asserts that can fire for such values. // // static unsigned char Compiler::compGetJitDefaultFill(Compiler* comp) { unsigned char defaultFill = (unsigned char)JitConfig.JitDefaultFill(); if (comp != nullptr && comp->compStressCompile(STRESS_GENERIC_VARN, 50)) { unsigned temp; temp = comp->info.compMethodHash(); temp = (temp >> 16) ^ temp; temp = (temp >> 8) ^ temp; temp = temp & 0xff; // asserts like this: assert(!IsUninitialized(stkLvl)); // mean that small values for defaultFill are problematic // so we make the value larger in that case. if (temp < 0x20) { temp |= 0x80; } // Make a misaligned pointer value to reduce probability of getting a valid value and firing // assert(!IsUninitialized(pointer)). temp |= 0x1; defaultFill = (unsigned char)temp; } return defaultFill; } #endif // DEBUG /*****************************************************************************/ #ifdef DEBUG /*****************************************************************************/ VarName Compiler::compVarName(regNumber reg, bool isFloatReg) { if (isFloatReg) { assert(genIsValidFloatReg(reg)); } else { assert(genIsValidReg(reg)); } if ((info.compVarScopesCount > 0) && compCurBB && opts.varNames) { unsigned lclNum; LclVarDsc* varDsc; /* Look for the matching register */ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { /* If the variable is not in a register, or not in the register we're looking for, quit. */ /* Also, if it is a compiler generated variable (i.e. slot# > info.compVarScopesCount), don't bother. */ if ((varDsc->lvRegister != 0) && (varDsc->GetRegNum() == reg) && (varDsc->lvSlotNum < info.compVarScopesCount)) { /* check if variable in that register is live */ if (VarSetOps::IsMember(this, compCurLife, varDsc->lvVarIndex)) { /* variable is live - find the corresponding slot */ VarScopeDsc* varScope = compFindLocalVar(varDsc->lvSlotNum, compCurBB->bbCodeOffs, compCurBB->bbCodeOffsEnd); if (varScope) { return varScope->vsdName; } } } } } return nullptr; } const char* Compiler::compRegVarName(regNumber reg, bool displayVar, bool isFloatReg) { #ifdef TARGET_ARM isFloatReg = genIsValidFloatReg(reg); #endif if (displayVar && (reg != REG_NA)) { VarName varName = compVarName(reg, isFloatReg); if (varName) { const int NAME_VAR_REG_BUFFER_LEN = 4 + 256 + 1; static char nameVarReg[2][NAME_VAR_REG_BUFFER_LEN]; // to avoid overwriting the buffer when have 2 // consecutive calls before printing static int index = 0; // for circular index into the name array index = (index + 1) % 2; // circular reuse of index sprintf_s(nameVarReg[index], NAME_VAR_REG_BUFFER_LEN, "%s'%s'", getRegName(reg), VarNameToStr(varName)); return nameVarReg[index]; } } /* no debug info required or no variable in that register -> return standard name */ return getRegName(reg); } const char* Compiler::compRegNameForSize(regNumber reg, size_t size) { if (size == 0 || size >= 4) { return compRegVarName(reg, true); } // clang-format off static const char * sizeNames[][2] = { { "al", "ax" }, { "cl", "cx" }, { "dl", "dx" }, { "bl", "bx" }, #ifdef TARGET_AMD64 { "spl", "sp" }, // ESP { "bpl", "bp" }, // EBP { "sil", "si" }, // ESI { "dil", "di" }, // EDI { "r8b", "r8w" }, { "r9b", "r9w" }, { "r10b", "r10w" }, { "r11b", "r11w" }, { "r12b", "r12w" }, { "r13b", "r13w" }, { "r14b", "r14w" }, { "r15b", "r15w" }, #endif // TARGET_AMD64 }; // clang-format on assert(isByteReg(reg)); assert(genRegMask(reg) & RBM_BYTE_REGS); assert(size == 1 || size == 2); return sizeNames[reg][size - 1]; } const char* Compiler::compLocalVarName(unsigned varNum, unsigned offs) { unsigned i; VarScopeDsc* t; for (i = 0, t = info.compVarScopes; i < info.compVarScopesCount; i++, t++) { if (t->vsdVarNum != varNum) { continue; } if (offs >= t->vsdLifeBeg && offs < t->vsdLifeEnd) { return VarNameToStr(t->vsdName); } } return nullptr; } /*****************************************************************************/ #endif // DEBUG /*****************************************************************************/ void Compiler::compSetProcessor() { // // NOTE: This function needs to be kept in sync with EEJitManager::SetCpuInfo() in vm\codeman.cpp // const JitFlags& jitFlags = *opts.jitFlags; #if defined(TARGET_ARM) info.genCPU = CPU_ARM; #elif defined(TARGET_ARM64) info.genCPU = CPU_ARM64; #elif defined(TARGET_AMD64) info.genCPU = CPU_X64; #elif defined(TARGET_X86) if (jitFlags.IsSet(JitFlags::JIT_FLAG_TARGET_P4)) info.genCPU = CPU_X86_PENTIUM_4; else info.genCPU = CPU_X86; #endif // // Processor specific optimizations // CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_AMD64 opts.compUseCMOV = true; #elif defined(TARGET_X86) opts.compUseCMOV = jitFlags.IsSet(JitFlags::JIT_FLAG_USE_CMOV); #ifdef DEBUG if (opts.compUseCMOV) opts.compUseCMOV = !compStressCompile(STRESS_USE_CMOV, 50); #endif // DEBUG #endif // TARGET_X86 // The VM will set the ISA flags depending on actual hardware support // and any specified config switches specified by the user. The exception // here is for certain "artificial ISAs" such as Vector64/128/256 where they // don't actually exist. The JIT is in charge of adding those and ensuring // the total sum of flags is still valid. CORINFO_InstructionSetFlags instructionSetFlags = jitFlags.GetInstructionSetFlags(); opts.compSupportsISA = 0; opts.compSupportsISAReported = 0; opts.compSupportsISAExactly = 0; #if defined(TARGET_XARCH) instructionSetFlags.AddInstructionSet(InstructionSet_Vector128); instructionSetFlags.AddInstructionSet(InstructionSet_Vector256); #endif // TARGET_XARCH #if defined(TARGET_ARM64) instructionSetFlags.AddInstructionSet(InstructionSet_Vector64); instructionSetFlags.AddInstructionSet(InstructionSet_Vector128); #endif // TARGET_ARM64 instructionSetFlags = EnsureInstructionSetFlagsAreValid(instructionSetFlags); opts.setSupportedISAs(instructionSetFlags); #ifdef TARGET_XARCH if (!compIsForInlining()) { if (canUseVexEncoding()) { codeGen->GetEmitter()->SetUseVEXEncoding(true); // Assume each JITted method does not contain AVX instruction at first codeGen->GetEmitter()->SetContainsAVX(false); codeGen->GetEmitter()->SetContains256bitAVX(false); } } #endif // TARGET_XARCH } bool Compiler::notifyInstructionSetUsage(CORINFO_InstructionSet isa, bool supported) const { const char* isaString = InstructionSetToString(isa); JITDUMP("Notify VM instruction set (%s) %s be supported.\n", isaString, supported ? "must" : "must not"); return info.compCompHnd->notifyInstructionSetUsage(isa, supported); } #ifdef PROFILING_SUPPORTED // A Dummy routine to receive Enter/Leave/Tailcall profiler callbacks. // These are used when complus_JitEltHookEnabled=1 #ifdef TARGET_AMD64 void DummyProfilerELTStub(UINT_PTR ProfilerHandle, UINT_PTR callerSP) { return; } #else //! TARGET_AMD64 void DummyProfilerELTStub(UINT_PTR ProfilerHandle) { return; } #endif //! TARGET_AMD64 #endif // PROFILING_SUPPORTED bool Compiler::compShouldThrowOnNoway( #ifdef FEATURE_TRACELOGGING const char* filename, unsigned line #endif ) { #ifdef FEATURE_TRACELOGGING compJitTelemetry.NotifyNowayAssert(filename, line); #endif // In min opts, we don't want the noway assert to go through the exception // path. Instead we want it to just silently go through codegen for // compat reasons. return !opts.MinOpts(); } // ConfigInteger does not offer an option for decimal flags. Any numbers are interpreted as hex. // I could add the decimal option to ConfigInteger or I could write a function to reinterpret this // value as the user intended. unsigned ReinterpretHexAsDecimal(unsigned in) { // ex: in: 0x100 returns: 100 unsigned result = 0; unsigned index = 1; // default value if (in == INT_MAX) { return in; } while (in) { unsigned digit = in % 16; in >>= 4; assert(digit < 10); result += digit * index; index *= 10; } return result; } void Compiler::compInitOptions(JitFlags* jitFlags) { #ifdef UNIX_AMD64_ABI opts.compNeedToAlignFrame = false; #endif // UNIX_AMD64_ABI memset(&opts, 0, sizeof(opts)); if (compIsForInlining()) { // The following flags are lost when inlining. (They are removed in // Compiler::fgInvokeInlineeCompiler().) assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_PROF_ENTERLEAVE)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_EnC)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_TRACK_TRANSITIONS)); } opts.jitFlags = jitFlags; opts.compFlags = CLFLG_MAXOPT; // Default value is for full optimization if (jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_CODE) || jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT) || jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) { opts.compFlags = CLFLG_MINOPT; } // Don't optimize .cctors (except prejit) or if we're an inlinee else if (!jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && ((info.compFlags & FLG_CCTOR) == FLG_CCTOR) && !compIsForInlining()) { opts.compFlags = CLFLG_MINOPT; } // Default value is to generate a blend of size and speed optimizations // opts.compCodeOpt = BLENDED_CODE; // If the EE sets SIZE_OPT or if we are compiling a Class constructor // we will optimize for code size at the expense of speed // if (jitFlags->IsSet(JitFlags::JIT_FLAG_SIZE_OPT) || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR)) { opts.compCodeOpt = SMALL_CODE; } // // If the EE sets SPEED_OPT we will optimize for speed at the expense of code size // else if (jitFlags->IsSet(JitFlags::JIT_FLAG_SPEED_OPT) || (jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1) && !jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT))) { opts.compCodeOpt = FAST_CODE; assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_SIZE_OPT)); } //------------------------------------------------------------------------- opts.compDbgCode = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_CODE); opts.compDbgInfo = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_INFO); opts.compDbgEnC = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_EnC); #ifdef DEBUG opts.compJitAlignLoopAdaptive = JitConfig.JitAlignLoopAdaptive() == 1; opts.compJitAlignLoopBoundary = (unsigned short)JitConfig.JitAlignLoopBoundary(); opts.compJitAlignLoopMinBlockWeight = (unsigned short)JitConfig.JitAlignLoopMinBlockWeight(); opts.compJitAlignLoopForJcc = JitConfig.JitAlignLoopForJcc() == 1; opts.compJitAlignLoopMaxCodeSize = (unsigned short)JitConfig.JitAlignLoopMaxCodeSize(); opts.compJitHideAlignBehindJmp = JitConfig.JitHideAlignBehindJmp() == 1; #else opts.compJitAlignLoopAdaptive = true; opts.compJitAlignLoopBoundary = DEFAULT_ALIGN_LOOP_BOUNDARY; opts.compJitAlignLoopMinBlockWeight = DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT; opts.compJitAlignLoopMaxCodeSize = DEFAULT_MAX_LOOPSIZE_FOR_ALIGN; opts.compJitHideAlignBehindJmp = true; #endif #ifdef TARGET_XARCH if (opts.compJitAlignLoopAdaptive) { // For adaptive alignment, padding limit is equal to the max instruction encoding // size which is 15 bytes. Hence (32 >> 1) - 1 = 15 bytes. opts.compJitAlignPaddingLimit = (opts.compJitAlignLoopBoundary >> 1) - 1; } else { // For non-adaptive alignment, padding limit is 1 less than the alignment boundary // specified. opts.compJitAlignPaddingLimit = opts.compJitAlignLoopBoundary - 1; } #elif TARGET_ARM64 if (opts.compJitAlignLoopAdaptive) { // For adaptive alignment, padding limit is same as specified by the alignment // boundary because all instructions are 4 bytes long. Hence (32 >> 1) = 16 bytes. opts.compJitAlignPaddingLimit = (opts.compJitAlignLoopBoundary >> 1); } else { // For non-adaptive, padding limit is same as specified by the alignment. opts.compJitAlignPaddingLimit = opts.compJitAlignLoopBoundary; } #endif assert(isPow2(opts.compJitAlignLoopBoundary)); #ifdef TARGET_ARM64 // The minimum encoding size for Arm64 is 4 bytes. assert(opts.compJitAlignLoopBoundary >= 4); #endif #if REGEN_SHORTCUTS || REGEN_CALLPAT // We never want to have debugging enabled when regenerating GC encoding patterns opts.compDbgCode = false; opts.compDbgInfo = false; opts.compDbgEnC = false; #endif compSetProcessor(); #ifdef DEBUG opts.dspOrder = false; // Optionally suppress inliner compiler instance dumping. // if (compIsForInlining()) { if (JitConfig.JitDumpInlinePhases() > 0) { verbose = impInlineInfo->InlinerCompiler->verbose; } else { verbose = false; } } else { verbose = false; codeGen->setVerbose(false); } verboseTrees = verbose && shouldUseVerboseTrees(); verboseSsa = verbose && shouldUseVerboseSsa(); asciiTrees = shouldDumpASCIITrees(); opts.dspDiffable = compIsForInlining() ? impInlineInfo->InlinerCompiler->opts.dspDiffable : false; #endif opts.altJit = false; #if defined(LATE_DISASM) && !defined(DEBUG) // For non-debug builds with the late disassembler built in, we currently always do late disassembly // (we have no way to determine when not to, since we don't have class/method names). // In the DEBUG case, this is initialized to false, below. opts.doLateDisasm = true; #endif #ifdef DEBUG const JitConfigValues::MethodSet* pfAltJit; if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { pfAltJit = &JitConfig.AltJitNgen(); } else { pfAltJit = &JitConfig.AltJit(); } if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT)) { if (pfAltJit->contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.altJit = true; } unsigned altJitLimit = ReinterpretHexAsDecimal(JitConfig.AltJitLimit()); if (altJitLimit > 0 && Compiler::jitTotalMethodCompiled >= altJitLimit) { opts.altJit = false; } } #else // !DEBUG const char* altJitVal; if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { altJitVal = JitConfig.AltJitNgen().list(); } else { altJitVal = JitConfig.AltJit().list(); } if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT)) { // In release mode, you either get all methods or no methods. You must use "*" as the parameter, or we ignore // it. You don't get to give a regular expression of methods to match. // (Partially, this is because we haven't computed and stored the method and class name except in debug, and it // might be expensive to do so.) if ((altJitVal != nullptr) && (strcmp(altJitVal, "*") == 0)) { opts.altJit = true; } } #endif // !DEBUG // Take care of COMPlus_AltJitExcludeAssemblies. if (opts.altJit) { // First, initialize the AltJitExcludeAssemblies list, but only do it once. if (!s_pAltJitExcludeAssembliesListInitialized) { const WCHAR* wszAltJitExcludeAssemblyList = JitConfig.AltJitExcludeAssemblies(); if (wszAltJitExcludeAssemblyList != nullptr) { // NOTE: The Assembly name list is allocated in the process heap, not in the no-release heap, which is // reclaimed // for every compilation. This is ok because we only allocate once, due to the static. s_pAltJitExcludeAssembliesList = new (HostAllocator::getHostAllocator()) AssemblyNamesList2(wszAltJitExcludeAssemblyList, HostAllocator::getHostAllocator()); } s_pAltJitExcludeAssembliesListInitialized = true; } if (s_pAltJitExcludeAssembliesList != nullptr) { // We have an exclusion list. See if this method is in an assembly that is on the list. // Note that we check this for every method, since we might inline across modules, and // if the inlinee module is on the list, we don't want to use the altjit for it. const char* methodAssemblyName = info.compCompHnd->getAssemblyName( info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd))); if (s_pAltJitExcludeAssembliesList->IsInList(methodAssemblyName)) { opts.altJit = false; } } } #ifdef DEBUG bool altJitConfig = !pfAltJit->isEmpty(); // If we have a non-empty AltJit config then we change all of these other // config values to refer only to the AltJit. Otherwise, a lot of COMPlus_* variables // would apply to both the altjit and the normal JIT, but we only care about // debugging the altjit if the COMPlus_AltJit configuration is set. // if (compIsForImportOnly() && (!altJitConfig || opts.altJit)) { if (JitConfig.JitImportBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { assert(!"JitImportBreak reached"); } } bool verboseDump = false; if (!altJitConfig || opts.altJit) { // We should only enable 'verboseDump' when we are actually compiling a matching method // and not enable it when we are just considering inlining a matching method. // if (!compIsForInlining()) { if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if (JitConfig.NgenDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { verboseDump = true; } unsigned ngenHashDumpVal = (unsigned)JitConfig.NgenHashDump(); if ((ngenHashDumpVal != (DWORD)-1) && (ngenHashDumpVal == info.compMethodHash())) { verboseDump = true; } } else { if (JitConfig.JitDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { verboseDump = true; } unsigned jitHashDumpVal = (unsigned)JitConfig.JitHashDump(); if ((jitHashDumpVal != (DWORD)-1) && (jitHashDumpVal == info.compMethodHash())) { verboseDump = true; } } } } // Optionally suppress dumping Tier0 jit requests. // if (verboseDump && jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) { verboseDump = (JitConfig.JitDumpTier0() > 0); } // Optionally suppress dumping except for a specific OSR jit request. // const int dumpAtOSROffset = JitConfig.JitDumpAtOSROffset(); if (verboseDump && (dumpAtOSROffset != -1)) { if (jitFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { verboseDump = (((IL_OFFSET)dumpAtOSROffset) == info.compILEntry); } else { verboseDump = false; } } if (verboseDump) { verbose = true; } #endif // DEBUG #ifdef FEATURE_SIMD #ifndef TARGET_ARM64 // Minimum bar for availing SIMD benefits is SSE2 on AMD64/x86. featureSIMD = jitFlags->IsSet(JitFlags::JIT_FLAG_FEATURE_SIMD); #endif setUsesSIMDTypes(false); #endif // FEATURE_SIMD lvaEnregEHVars = (compEnregLocals() && JitConfig.EnableEHWriteThru()); lvaEnregMultiRegVars = (compEnregLocals() && JitConfig.EnableMultiRegLocals()); if (compIsForImportOnly()) { return; } #if FEATURE_TAILCALL_OPT // By default opportunistic tail call optimization is enabled. // Recognition is done in the importer so this must be set for // inlinees as well. opts.compTailCallOpt = true; #endif // FEATURE_TAILCALL_OPT #if FEATURE_FASTTAILCALL // By default fast tail calls are enabled. opts.compFastTailCalls = true; #endif // FEATURE_FASTTAILCALL // Profile data // fgPgoSchema = nullptr; fgPgoData = nullptr; fgPgoSchemaCount = 0; fgPgoQueryResult = E_FAIL; fgPgoFailReason = nullptr; fgPgoSource = ICorJitInfo::PgoSource::Unknown; if (jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT)) { fgPgoQueryResult = info.compCompHnd->getPgoInstrumentationResults(info.compMethodHnd, &fgPgoSchema, &fgPgoSchemaCount, &fgPgoData, &fgPgoSource); // a failed result that also has a non-NULL fgPgoSchema // indicates that the ILSize for the method no longer matches // the ILSize for the method when profile data was collected. // // We will discard the IBC data in this case // if (FAILED(fgPgoQueryResult)) { fgPgoFailReason = (fgPgoSchema != nullptr) ? "No matching PGO data" : "No PGO data"; fgPgoData = nullptr; fgPgoSchema = nullptr; } // Optionally, disable use of profile data. // else if (JitConfig.JitDisablePgo() > 0) { fgPgoFailReason = "PGO data available, but JitDisablePgo > 0"; fgPgoQueryResult = E_FAIL; fgPgoData = nullptr; fgPgoSchema = nullptr; fgPgoDisabled = true; } #ifdef DEBUG // Optionally, enable use of profile data for only some methods. // else { static ConfigMethodRange JitEnablePgoRange; JitEnablePgoRange.EnsureInit(JitConfig.JitEnablePgoRange()); // Base this decision on the root method hash, so a method either sees all available // profile data (including that for inlinees), or none of it. // const unsigned hash = impInlineRoot()->info.compMethodHash(); if (!JitEnablePgoRange.Contains(hash)) { fgPgoFailReason = "PGO data available, but method hash NOT within JitEnablePgoRange"; fgPgoQueryResult = E_FAIL; fgPgoData = nullptr; fgPgoSchema = nullptr; fgPgoDisabled = true; } } // A successful result implies a non-NULL fgPgoSchema // if (SUCCEEDED(fgPgoQueryResult)) { assert(fgPgoSchema != nullptr); } // A failed result implies a NULL fgPgoSchema // see implementation of Compiler::fgHaveProfileData() // if (FAILED(fgPgoQueryResult)) { assert(fgPgoSchema == nullptr); } #endif } if (compIsForInlining()) { return; } // The rest of the opts fields that we initialize here // should only be used when we generate code for the method // They should not be used when importing or inlining CLANG_FORMAT_COMMENT_ANCHOR; #if FEATURE_TAILCALL_OPT opts.compTailCallLoopOpt = true; #endif // FEATURE_TAILCALL_OPT opts.genFPorder = true; opts.genFPopt = true; opts.instrCount = 0; opts.lvRefCount = 0; #ifdef PROFILING_SUPPORTED opts.compJitELTHookEnabled = false; #endif // PROFILING_SUPPORTED #if defined(TARGET_ARM64) // 0 is default: use the appropriate frame type based on the function. opts.compJitSaveFpLrWithCalleeSavedRegisters = 0; #endif // defined(TARGET_ARM64) #ifdef DEBUG opts.dspInstrs = false; opts.dspLines = false; opts.varNames = false; opts.dmpHex = false; opts.disAsm = false; opts.disAsmSpilled = false; opts.disDiffable = false; opts.disAddr = false; opts.disAlignment = false; opts.dspCode = false; opts.dspEHTable = false; opts.dspDebugInfo = false; opts.dspGCtbls = false; opts.disAsm2 = false; opts.dspUnwind = false; opts.compLongAddress = false; opts.optRepeat = false; #ifdef LATE_DISASM opts.doLateDisasm = false; #endif // LATE_DISASM compDebugBreak = false; // If we have a non-empty AltJit config then we change all of these other // config values to refer only to the AltJit. // if (!altJitConfig || opts.altJit) { if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if ((JitConfig.NgenOrder() & 1) == 1) { opts.dspOrder = true; } if (JitConfig.NgenGCDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspGCtbls = true; } if (JitConfig.NgenDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.disAsm = true; } if (JitConfig.NgenDisasm().contains("SPILLED", nullptr, nullptr)) { opts.disAsmSpilled = true; } if (JitConfig.NgenUnwindDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspUnwind = true; } if (JitConfig.NgenEHDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspEHTable = true; } if (JitConfig.NgenDebugDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspDebugInfo = true; } } else { bool disEnabled = true; // Setup assembly name list for disassembly, if not already set up. if (!s_pJitDisasmIncludeAssembliesListInitialized) { const WCHAR* assemblyNameList = JitConfig.JitDisasmAssemblies(); if (assemblyNameList != nullptr) { s_pJitDisasmIncludeAssembliesList = new (HostAllocator::getHostAllocator()) AssemblyNamesList2(assemblyNameList, HostAllocator::getHostAllocator()); } s_pJitDisasmIncludeAssembliesListInitialized = true; } // If we have an assembly name list for disassembly, also check this method's assembly. if (s_pJitDisasmIncludeAssembliesList != nullptr && !s_pJitDisasmIncludeAssembliesList->IsEmpty()) { const char* assemblyName = info.compCompHnd->getAssemblyName( info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd))); if (!s_pJitDisasmIncludeAssembliesList->IsInList(assemblyName)) { disEnabled = false; } } if (disEnabled) { if ((JitConfig.JitOrder() & 1) == 1) { opts.dspOrder = true; } if (JitConfig.JitGCDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspGCtbls = true; } if (JitConfig.JitDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.disAsm = true; } if (JitConfig.JitDisasm().contains("SPILLED", nullptr, nullptr)) { opts.disAsmSpilled = true; } if (JitConfig.JitUnwindDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspUnwind = true; } if (JitConfig.JitEHDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspEHTable = true; } if (JitConfig.JitDebugDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspDebugInfo = true; } } } if (opts.disAsm && JitConfig.JitDisasmWithGC()) { opts.disasmWithGC = true; } #ifdef LATE_DISASM if (JitConfig.JitLateDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) opts.doLateDisasm = true; #endif // LATE_DISASM // This one applies to both Ngen/Jit Disasm output: COMPlus_JitDiffableDasm=1 if (JitConfig.DiffableDasm() != 0) { opts.disDiffable = true; opts.dspDiffable = true; } // This one applies to both Ngen/Jit Disasm output: COMPlus_JitDasmWithAddress=1 if (JitConfig.JitDasmWithAddress() != 0) { opts.disAddr = true; } if (JitConfig.JitDasmWithAlignmentBoundaries() != 0) { opts.disAlignment = true; } if (JitConfig.JitLongAddress() != 0) { opts.compLongAddress = true; } if (JitConfig.JitOptRepeat().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.optRepeat = true; } } if (verboseDump) { opts.dspCode = true; opts.dspEHTable = true; opts.dspGCtbls = true; opts.disAsm2 = true; opts.dspUnwind = true; verbose = true; verboseTrees = shouldUseVerboseTrees(); verboseSsa = shouldUseVerboseSsa(); codeGen->setVerbose(true); } treesBeforeAfterMorph = (JitConfig.TreesBeforeAfterMorph() == 1); morphNum = 0; // Initialize the morphed-trees counting. expensiveDebugCheckLevel = JitConfig.JitExpensiveDebugCheckLevel(); if (expensiveDebugCheckLevel == 0) { // If we're in a stress mode that modifies the flowgraph, make 1 the default. if (fgStressBBProf() || compStressCompile(STRESS_DO_WHILE_LOOPS, 30)) { expensiveDebugCheckLevel = 1; } } if (verbose) { printf("****** START compiling %s (MethodHash=%08x)\n", info.compFullName, info.compMethodHash()); printf("Generating code for %s %s\n", Target::g_tgtPlatformName(), Target::g_tgtCPUName); printf(""); // in our logic this causes a flush } if (JitConfig.JitBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { assert(!"JitBreak reached"); } unsigned jitHashBreakVal = (unsigned)JitConfig.JitHashBreak(); if ((jitHashBreakVal != (DWORD)-1) && (jitHashBreakVal == info.compMethodHash())) { assert(!"JitHashBreak reached"); } if (verbose || JitConfig.JitDebugBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args) || JitConfig.JitBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { compDebugBreak = true; } memset(compActiveStressModes, 0, sizeof(compActiveStressModes)); // Read function list, if not already read, and there exists such a list. if (!s_pJitFunctionFileInitialized) { const WCHAR* functionFileName = JitConfig.JitFunctionFile(); if (functionFileName != nullptr) { s_pJitMethodSet = new (HostAllocator::getHostAllocator()) MethodSet(functionFileName, HostAllocator::getHostAllocator()); } s_pJitFunctionFileInitialized = true; } #endif // DEBUG //------------------------------------------------------------------------- #ifdef DEBUG assert(!codeGen->isGCTypeFixed()); opts.compGcChecks = (JitConfig.JitGCChecks() != 0) || compStressCompile(STRESS_GENERIC_VARN, 5); #endif #if defined(DEBUG) && defined(TARGET_XARCH) enum { STACK_CHECK_ON_RETURN = 0x1, STACK_CHECK_ON_CALL = 0x2, STACK_CHECK_ALL = 0x3 }; DWORD dwJitStackChecks = JitConfig.JitStackChecks(); if (compStressCompile(STRESS_GENERIC_VARN, 5)) { dwJitStackChecks = STACK_CHECK_ALL; } opts.compStackCheckOnRet = (dwJitStackChecks & DWORD(STACK_CHECK_ON_RETURN)) != 0; #if defined(TARGET_X86) opts.compStackCheckOnCall = (dwJitStackChecks & DWORD(STACK_CHECK_ON_CALL)) != 0; #endif // defined(TARGET_X86) #endif // defined(DEBUG) && defined(TARGET_XARCH) #if MEASURE_MEM_ALLOC s_dspMemStats = (JitConfig.DisplayMemStats() != 0); #endif #ifdef PROFILING_SUPPORTED opts.compNoPInvokeInlineCB = jitFlags->IsSet(JitFlags::JIT_FLAG_PROF_NO_PINVOKE_INLINE); // Cache the profiler handle if (jitFlags->IsSet(JitFlags::JIT_FLAG_PROF_ENTERLEAVE)) { bool hookNeeded; bool indirected; info.compCompHnd->GetProfilingHandle(&hookNeeded, &compProfilerMethHnd, &indirected); compProfilerHookNeeded = !!hookNeeded; compProfilerMethHndIndirected = !!indirected; } else { compProfilerHookNeeded = false; compProfilerMethHnd = nullptr; compProfilerMethHndIndirected = false; } // Honour COMPlus_JitELTHookEnabled or STRESS_PROFILER_CALLBACKS stress mode // only if VM has not asked us to generate profiler hooks in the first place. // That is, override VM only if it hasn't asked for a profiler callback for this method. // Don't run this stress mode when pre-JITing, as we would need to emit a relocation // for the call to the fake ELT hook, which wouldn't make sense, as we can't store that // in the pre-JIT image. if (!compProfilerHookNeeded) { if ((JitConfig.JitELTHookEnabled() != 0) || (!jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && compStressCompile(STRESS_PROFILER_CALLBACKS, 5))) { opts.compJitELTHookEnabled = true; } } // TBD: Exclude PInvoke stubs if (opts.compJitELTHookEnabled) { compProfilerMethHnd = (void*)DummyProfilerELTStub; compProfilerMethHndIndirected = false; } #endif // PROFILING_SUPPORTED #if FEATURE_TAILCALL_OPT const WCHAR* strTailCallOpt = JitConfig.TailCallOpt(); if (strTailCallOpt != nullptr) { opts.compTailCallOpt = (UINT)_wtoi(strTailCallOpt) != 0; } if (JitConfig.TailCallLoopOpt() == 0) { opts.compTailCallLoopOpt = false; } #endif #if FEATURE_FASTTAILCALL if (JitConfig.FastTailCalls() == 0) { opts.compFastTailCalls = false; } #endif // FEATURE_FASTTAILCALL #ifdef CONFIGURABLE_ARM_ABI opts.compUseSoftFP = jitFlags->IsSet(JitFlags::JIT_FLAG_SOFTFP_ABI); unsigned int softFPConfig = opts.compUseSoftFP ? 2 : 1; unsigned int oldSoftFPConfig = InterlockedCompareExchange(&GlobalJitOptions::compUseSoftFPConfigured, softFPConfig, 0); if (oldSoftFPConfig != softFPConfig && oldSoftFPConfig != 0) { // There are no current scenarios where the abi can change during the lifetime of a process // that uses the JIT. If such a change occurs, either compFeatureHfa will need to change to a TLS static // or we will need to have some means to reset the flag safely. NO_WAY("SoftFP ABI setting changed during lifetime of process"); } GlobalJitOptions::compFeatureHfa = !opts.compUseSoftFP; #elif defined(ARM_SOFTFP) && defined(TARGET_ARM) // Armel is unconditionally enabled in the JIT. Verify that the VM side agrees. assert(jitFlags->IsSet(JitFlags::JIT_FLAG_SOFTFP_ABI)); #elif defined(TARGET_ARM) assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_SOFTFP_ABI)); #endif // CONFIGURABLE_ARM_ABI opts.compScopeInfo = opts.compDbgInfo; #ifdef LATE_DISASM codeGen->getDisAssembler().disOpenForLateDisAsm(info.compMethodName, info.compClassName, info.compMethodInfo->args.pSig); #endif //------------------------------------------------------------------------- opts.compReloc = jitFlags->IsSet(JitFlags::JIT_FLAG_RELOC); #ifdef DEBUG #if defined(TARGET_XARCH) // Whether encoding of absolute addr as PC-rel offset is enabled opts.compEnablePCRelAddr = (JitConfig.EnablePCRelAddr() != 0); #endif #endif // DEBUG opts.compProcedureSplitting = jitFlags->IsSet(JitFlags::JIT_FLAG_PROCSPLIT); #ifdef TARGET_ARM64 // TODO-ARM64-NYI: enable hot/cold splitting opts.compProcedureSplitting = false; #endif // TARGET_ARM64 #ifdef DEBUG opts.compProcedureSplittingEH = opts.compProcedureSplitting; #endif // DEBUG if (opts.compProcedureSplitting) { // Note that opts.compdbgCode is true under ngen for checked assemblies! opts.compProcedureSplitting = !opts.compDbgCode; #ifdef DEBUG // JitForceProcedureSplitting is used to force procedure splitting on checked assemblies. // This is useful for debugging on a checked build. Note that we still only do procedure // splitting in the zapper. if (JitConfig.JitForceProcedureSplitting().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.compProcedureSplitting = true; } // JitNoProcedureSplitting will always disable procedure splitting. if (JitConfig.JitNoProcedureSplitting().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.compProcedureSplitting = false; } // // JitNoProcedureSplittingEH will disable procedure splitting in functions with EH. if (JitConfig.JitNoProcedureSplittingEH().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.compProcedureSplittingEH = false; } #endif } #ifdef DEBUG // Now, set compMaxUncheckedOffsetForNullObject for STRESS_NULL_OBJECT_CHECK if (compStressCompile(STRESS_NULL_OBJECT_CHECK, 30)) { compMaxUncheckedOffsetForNullObject = (size_t)JitConfig.JitMaxUncheckedOffset(); if (verbose) { printf("STRESS_NULL_OBJECT_CHECK: compMaxUncheckedOffsetForNullObject=0x%X\n", compMaxUncheckedOffsetForNullObject); } } if (verbose) { // If we are compiling for a specific tier, make that very obvious in the output. // Note that we don't expect multiple TIER flags to be set at one time, but there // is nothing preventing that. if (jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) { printf("OPTIONS: Tier-0 compilation (set COMPlus_TieredCompilation=0 to disable)\n"); } if (jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1)) { printf("OPTIONS: Tier-1 compilation\n"); } if (compSwitchedToOptimized) { printf("OPTIONS: Tier-0 compilation, switched to FullOpts\n"); } if (compSwitchedToMinOpts) { printf("OPTIONS: Tier-1/FullOpts compilation, switched to MinOpts\n"); } if (jitFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { printf("OPTIONS: OSR variant with entry point 0x%x\n", info.compILEntry); } printf("OPTIONS: compCodeOpt = %s\n", (opts.compCodeOpt == BLENDED_CODE) ? "BLENDED_CODE" : (opts.compCodeOpt == SMALL_CODE) ? "SMALL_CODE" : (opts.compCodeOpt == FAST_CODE) ? "FAST_CODE" : "UNKNOWN_CODE"); printf("OPTIONS: compDbgCode = %s\n", dspBool(opts.compDbgCode)); printf("OPTIONS: compDbgInfo = %s\n", dspBool(opts.compDbgInfo)); printf("OPTIONS: compDbgEnC = %s\n", dspBool(opts.compDbgEnC)); printf("OPTIONS: compProcedureSplitting = %s\n", dspBool(opts.compProcedureSplitting)); printf("OPTIONS: compProcedureSplittingEH = %s\n", dspBool(opts.compProcedureSplittingEH)); if (jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT) && fgHaveProfileData()) { printf("OPTIONS: optimized using %s profile data\n", pgoSourceToString(fgPgoSource)); } if (fgPgoFailReason != nullptr) { printf("OPTIONS: %s\n", fgPgoFailReason); } if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { printf("OPTIONS: Jit invoked for ngen\n"); } } #endif #ifdef PROFILING_SUPPORTED #ifdef UNIX_AMD64_ABI if (compIsProfilerHookNeeded()) { opts.compNeedToAlignFrame = true; } #endif // UNIX_AMD64_ABI #endif #if defined(DEBUG) && defined(TARGET_ARM64) if ((s_pJitMethodSet == nullptr) || s_pJitMethodSet->IsActiveMethod(info.compFullName, info.compMethodHash())) { opts.compJitSaveFpLrWithCalleeSavedRegisters = JitConfig.JitSaveFpLrWithCalleeSavedRegisters(); } #endif // defined(DEBUG) && defined(TARGET_ARM64) } #ifdef DEBUG bool Compiler::compJitHaltMethod() { /* This method returns true when we use an INS_BREAKPOINT to allow us to step into the generated native code */ /* Note that this these two "Jit" environment variables also work for ngen images */ if (JitConfig.JitHalt().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return true; } /* Use this Hash variant when there are a lot of method with the same name and different signatures */ unsigned fJitHashHaltVal = (unsigned)JitConfig.JitHashHalt(); if ((fJitHashHaltVal != (unsigned)-1) && (fJitHashHaltVal == info.compMethodHash())) { return true; } return false; } /***************************************************************************** * Should we use a "stress-mode" for the given stressArea. We have different * areas to allow the areas to be mixed in different combinations in * different methods. * 'weight' indicates how often (as a percentage) the area should be stressed. * It should reflect the usefulness:overhead ratio. */ const LPCWSTR Compiler::s_compStressModeNames[STRESS_COUNT + 1] = { #define STRESS_MODE(mode) W("STRESS_") W(#mode), STRESS_MODES #undef STRESS_MODE }; //------------------------------------------------------------------------ // compStressCompile: determine if a stress mode should be enabled // // Arguments: // stressArea - stress mode to possibly enable // weight - percent of time this mode should be turned on // (range 0 to 100); weight 0 effectively disables // // Returns: // true if this stress mode is enabled // // Notes: // Methods may be excluded from stress via name or hash. // // Particular stress modes may be disabled or forcibly enabled. // // With JitStress=2, some stress modes are enabled regardless of weight; // these modes are the ones after COUNT_VARN in the enumeration. // // For other modes or for nonzero JitStress values, stress will be // enabled selectively for roughly weight% of methods. // bool Compiler::compStressCompile(compStressArea stressArea, unsigned weight) { // This can be called early, before info is fully set up. if ((info.compMethodName == nullptr) || (info.compFullName == nullptr)) { return false; } // Inlinees defer to the root method for stress, so that we can // more easily isolate methods that cause stress failures. if (compIsForInlining()) { return impInlineRoot()->compStressCompile(stressArea, weight); } const bool doStress = compStressCompileHelper(stressArea, weight); if (doStress && !compActiveStressModes[stressArea]) { if (verbose) { printf("\n\n*** JitStress: %ws ***\n\n", s_compStressModeNames[stressArea]); } compActiveStressModes[stressArea] = 1; } return doStress; } //------------------------------------------------------------------------ // compStressCompileHelper: helper to determine if a stress mode should be enabled // // Arguments: // stressArea - stress mode to possibly enable // weight - percent of time this mode should be turned on // (range 0 to 100); weight 0 effectively disables // // Returns: // true if this stress mode is enabled // // Notes: // See compStressCompile // bool Compiler::compStressCompileHelper(compStressArea stressArea, unsigned weight) { if (!bRangeAllowStress) { return false; } if (!JitConfig.JitStressOnly().isEmpty() && !JitConfig.JitStressOnly().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return false; } // Does user explicitly prevent using this STRESS_MODE through the command line? const WCHAR* strStressModeNamesNot = JitConfig.JitStressModeNamesNot(); if ((strStressModeNamesNot != nullptr) && (wcsstr(strStressModeNamesNot, s_compStressModeNames[stressArea]) != nullptr)) { return false; } // Does user explicitly set this STRESS_MODE through the command line? const WCHAR* strStressModeNames = JitConfig.JitStressModeNames(); if (strStressModeNames != nullptr) { if (wcsstr(strStressModeNames, s_compStressModeNames[stressArea]) != nullptr) { return true; } // This stress mode name did not match anything in the stress // mode allowlist. If user has requested only enable mode, // don't allow this stress mode to turn on. const bool onlyEnableMode = JitConfig.JitStressModeNamesOnly() != 0; if (onlyEnableMode) { return false; } } // 0: No stress (Except when explicitly set in complus_JitStressModeNames) // !=2: Vary stress. Performance will be slightly/moderately degraded // 2: Check-all stress. Performance will be REALLY horrible const int stressLevel = getJitStressLevel(); assert(weight <= MAX_STRESS_WEIGHT); // Check for boundary conditions if (stressLevel == 0 || weight == 0) { return false; } // Should we allow unlimited stress ? if ((stressArea > STRESS_COUNT_VARN) && (stressLevel == 2)) { return true; } if (weight == MAX_STRESS_WEIGHT) { return true; } // Get a hash which can be compared with 'weight' assert(stressArea != 0); const unsigned hash = (info.compMethodHash() ^ stressArea ^ stressLevel) % MAX_STRESS_WEIGHT; assert(hash < MAX_STRESS_WEIGHT && weight <= MAX_STRESS_WEIGHT); return (hash < weight); } //------------------------------------------------------------------------ // compPromoteFewerStructs: helper to determine if the local // should not be promoted under a stress mode. // // Arguments: // lclNum - local number to test // // Returns: // true if this local should not be promoted. // // Notes: // Reject ~50% of the potential promotions if STRESS_PROMOTE_FEWER_STRUCTS is active. // bool Compiler::compPromoteFewerStructs(unsigned lclNum) { bool rejectThisPromo = false; const bool promoteLess = compStressCompile(STRESS_PROMOTE_FEWER_STRUCTS, 50); if (promoteLess) { rejectThisPromo = (((info.compMethodHash() ^ lclNum) & 1) == 0); } return rejectThisPromo; } #endif // DEBUG void Compiler::compInitDebuggingInfo() { #ifdef DEBUG if (verbose) { printf("*************** In compInitDebuggingInfo() for %s\n", info.compFullName); } #endif /*------------------------------------------------------------------------- * * Get hold of the local variable records, if there are any */ info.compVarScopesCount = 0; if (opts.compScopeInfo) { eeGetVars(); } compInitVarScopeMap(); if (opts.compScopeInfo || opts.compDbgCode) { compInitScopeLists(); } if (opts.compDbgCode && (info.compVarScopesCount > 0)) { /* Create a new empty basic block. fgExtendDbgLifetimes() may add initialization of variables which are in scope right from the start of the (real) first BB (and therefore artificially marked as alive) into this block. */ fgEnsureFirstBBisScratch(); fgNewStmtAtEnd(fgFirstBB, gtNewNothingNode()); JITDUMP("Debuggable code - Add new %s to perform initialization of variables\n", fgFirstBB->dspToString()); } /*------------------------------------------------------------------------- * * Read the stmt-offsets table and the line-number table */ info.compStmtOffsetsImplicit = ICorDebugInfo::NO_BOUNDARIES; // We can only report debug info for EnC at places where the stack is empty. // Actually, at places where there are not live temps. Else, we won't be able // to map between the old and the new versions correctly as we won't have // any info for the live temps. assert(!opts.compDbgEnC || !opts.compDbgInfo || 0 == (info.compStmtOffsetsImplicit & ~ICorDebugInfo::STACK_EMPTY_BOUNDARIES)); info.compStmtOffsetsCount = 0; if (opts.compDbgInfo) { /* Get hold of the line# records, if there are any */ eeGetStmtOffsets(); #ifdef DEBUG if (verbose) { printf("info.compStmtOffsetsCount = %d\n", info.compStmtOffsetsCount); printf("info.compStmtOffsetsImplicit = %04Xh", info.compStmtOffsetsImplicit); if (info.compStmtOffsetsImplicit) { printf(" ( "); if (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) { printf("STACK_EMPTY "); } if (info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) { printf("NOP "); } if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { printf("CALL_SITE "); } printf(")"); } printf("\n"); IL_OFFSET* pOffs = info.compStmtOffsets; for (unsigned i = 0; i < info.compStmtOffsetsCount; i++, pOffs++) { printf("%02d) IL_%04Xh\n", i, *pOffs); } } #endif } } void Compiler::compSetOptimizationLevel() { bool theMinOptsValue; #pragma warning(suppress : 4101) unsigned jitMinOpts; if (compIsForInlining()) { theMinOptsValue = impInlineInfo->InlinerCompiler->opts.MinOpts(); goto _SetMinOpts; } theMinOptsValue = false; if (opts.compFlags == CLFLG_MINOPT) { JITLOG((LL_INFO100, "CLFLG_MINOPT set for method %s\n", info.compFullName)); theMinOptsValue = true; } #ifdef DEBUG jitMinOpts = JitConfig.JitMinOpts(); if (!theMinOptsValue && (jitMinOpts > 0)) { // jitTotalMethodCompiled does not include the method that is being compiled now, so make +1. unsigned methodCount = Compiler::jitTotalMethodCompiled + 1; unsigned methodCountMask = methodCount & 0xFFF; unsigned kind = (jitMinOpts & 0xF000000) >> 24; switch (kind) { default: if (jitMinOpts <= methodCount) { if (verbose) { printf(" Optimizations disabled by JitMinOpts and methodCount\n"); } theMinOptsValue = true; } break; case 0xD: { unsigned firstMinopts = (jitMinOpts >> 12) & 0xFFF; unsigned secondMinopts = (jitMinOpts >> 0) & 0xFFF; if ((firstMinopts == methodCountMask) || (secondMinopts == methodCountMask)) { if (verbose) { printf("0xD: Optimizations disabled by JitMinOpts and methodCountMask\n"); } theMinOptsValue = true; } } break; case 0xE: { unsigned startMinopts = (jitMinOpts >> 12) & 0xFFF; unsigned endMinopts = (jitMinOpts >> 0) & 0xFFF; if ((startMinopts <= methodCountMask) && (endMinopts >= methodCountMask)) { if (verbose) { printf("0xE: Optimizations disabled by JitMinOpts and methodCountMask\n"); } theMinOptsValue = true; } } break; case 0xF: { unsigned bitsZero = (jitMinOpts >> 12) & 0xFFF; unsigned bitsOne = (jitMinOpts >> 0) & 0xFFF; if (((methodCountMask & bitsOne) == bitsOne) && ((~methodCountMask & bitsZero) == bitsZero)) { if (verbose) { printf("0xF: Optimizations disabled by JitMinOpts and methodCountMask\n"); } theMinOptsValue = true; } } break; } } if (!theMinOptsValue) { if (JitConfig.JitMinOptsName().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { theMinOptsValue = true; } } #if 0 // The code in this #if can be used to debug optimization issues according to method hash. // To use, uncomment, rebuild and set environment variables minoptshashlo and minoptshashhi. #ifdef DEBUG unsigned methHash = info.compMethodHash(); char* lostr = getenv("minoptshashlo"); unsigned methHashLo = 0; if (lostr != nullptr) { sscanf_s(lostr, "%x", &methHashLo); char* histr = getenv("minoptshashhi"); unsigned methHashHi = UINT32_MAX; if (histr != nullptr) { sscanf_s(histr, "%x", &methHashHi); if (methHash >= methHashLo && methHash <= methHashHi) { printf("MinOpts for method %s, hash = %08x.\n", info.compFullName, methHash); printf(""); // in our logic this causes a flush theMinOptsValue = true; } } } #endif #endif if (compStressCompile(STRESS_MIN_OPTS, 5)) { theMinOptsValue = true; } // For PREJIT we never drop down to MinOpts // unless unless CLFLG_MINOPT is set else if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if ((unsigned)JitConfig.JitMinOptsCodeSize() < info.compILCodeSize) { JITLOG((LL_INFO10, "IL Code Size exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsInstrCount() < opts.instrCount) { JITLOG((LL_INFO10, "IL instruction count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsBbCount() < fgBBcount) { JITLOG((LL_INFO10, "Basic Block count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsLvNumCount() < lvaCount) { JITLOG((LL_INFO10, "Local Variable Num count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsLvRefCount() < opts.lvRefCount) { JITLOG((LL_INFO10, "Local Variable Ref count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } if (theMinOptsValue == true) { JITLOG((LL_INFO10000, "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count " "%3d,%3d for method %s\n", info.compILCodeSize, opts.instrCount, fgBBcount, lvaCount, opts.lvRefCount, info.compFullName)); if (JitConfig.JitBreakOnMinOpts() != 0) { assert(!"MinOpts enabled"); } } } #else // !DEBUG // Retail check if we should force Minopts due to the complexity of the method // For PREJIT we never drop down to MinOpts // unless unless CLFLG_MINOPT is set if (!theMinOptsValue && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && ((DEFAULT_MIN_OPTS_CODE_SIZE < info.compILCodeSize) || (DEFAULT_MIN_OPTS_INSTR_COUNT < opts.instrCount) || (DEFAULT_MIN_OPTS_BB_COUNT < fgBBcount) || (DEFAULT_MIN_OPTS_LV_NUM_COUNT < lvaCount) || (DEFAULT_MIN_OPTS_LV_REF_COUNT < opts.lvRefCount))) { theMinOptsValue = true; } #endif // DEBUG JITLOG((LL_INFO10000, "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count %3d,%3d for method %s\n", info.compILCodeSize, opts.instrCount, fgBBcount, lvaCount, opts.lvRefCount, info.compFullName)); #if 0 // The code in this #if has been useful in debugging loop cloning issues, by // enabling selective enablement of the loop cloning optimization according to // method hash. #ifdef DEBUG if (!theMinOptsValue) { unsigned methHash = info.compMethodHash(); char* lostr = getenv("opthashlo"); unsigned methHashLo = 0; if (lostr != NULL) { sscanf_s(lostr, "%x", &methHashLo); // methHashLo = (unsigned(atoi(lostr)) << 2); // So we don't have to use negative numbers. } char* histr = getenv("opthashhi"); unsigned methHashHi = UINT32_MAX; if (histr != NULL) { sscanf_s(histr, "%x", &methHashHi); // methHashHi = (unsigned(atoi(histr)) << 2); // So we don't have to use negative numbers. } if (methHash < methHashLo || methHash > methHashHi) { theMinOptsValue = true; } else { printf("Doing optimization in in %s (0x%x).\n", info.compFullName, methHash); } } #endif #endif _SetMinOpts: // Set the MinOpts value opts.SetMinOpts(theMinOptsValue); // Notify the VM if MinOpts is being used when not requested if (theMinOptsValue && !compIsForInlining() && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT) && !opts.compDbgCode) { info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_SWITCHED_TO_MIN_OPT); opts.jitFlags->Clear(JitFlags::JIT_FLAG_TIER1); compSwitchedToMinOpts = true; } #ifdef DEBUG if (verbose && !compIsForInlining()) { printf("OPTIONS: opts.MinOpts() == %s\n", opts.MinOpts() ? "true" : "false"); } #endif /* Control the optimizations */ if (opts.OptimizationDisabled()) { opts.compFlags &= ~CLFLG_MAXOPT; opts.compFlags |= CLFLG_MINOPT; } if (!compIsForInlining()) { codeGen->setFramePointerRequired(false); codeGen->setFrameRequired(false); if (opts.OptimizationDisabled()) { codeGen->setFrameRequired(true); } #if !defined(TARGET_AMD64) // The VM sets JitFlags::JIT_FLAG_FRAMED for two reasons: (1) the COMPlus_JitFramed variable is set, or // (2) the function is marked "noinline". The reason for #2 is that people mark functions // noinline to ensure the show up on in a stack walk. But for AMD64, we don't need a frame // pointer for the frame to show up in stack walk. if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_FRAMED)) codeGen->setFrameRequired(true); #endif if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { // The JIT doesn't currently support loop alignment for prejitted images. // (The JIT doesn't know the final address of the code, hence // it can't align code based on unknown addresses.) codeGen->SetAlignLoops(false); // loop alignment not supported for prejitted code } else { codeGen->SetAlignLoops(JitConfig.JitAlignLoops() == 1); } } #if TARGET_ARM // A single JitStress=1 Linux ARM32 test fails when we expand virtual calls early // JIT\HardwareIntrinsics\General\Vector128_1\Vector128_1_ro // opts.compExpandCallsEarly = (JitConfig.JitExpandCallsEarly() == 2); #else opts.compExpandCallsEarly = (JitConfig.JitExpandCallsEarly() != 0); #endif fgCanRelocateEHRegions = true; } #ifdef TARGET_ARMARCH // Function compRsvdRegCheck: // given a curState to use for calculating the total frame size // it will return true if the REG_OPT_RSVD should be reserved so // that it can be use to form large offsets when accessing stack // based LclVar including both incoming and out going argument areas. // // The method advances the frame layout state to curState by calling // lvaFrameSize(curState). // bool Compiler::compRsvdRegCheck(FrameLayoutState curState) { // Always do the layout even if returning early. Callers might // depend on us to do the layout. unsigned frameSize = lvaFrameSize(curState); JITDUMP("\n" "compRsvdRegCheck\n" " frame size = %6d\n" " compArgSize = %6d\n", frameSize, compArgSize); if (opts.MinOpts()) { // Have a recovery path in case we fail to reserve REG_OPT_RSVD and go // over the limit of SP and FP offset ranges due to large // temps. JITDUMP(" Returning true (MinOpts)\n\n"); return true; } unsigned calleeSavedRegMaxSz = CALLEE_SAVED_REG_MAXSZ; if (compFloatingPointUsed) { calleeSavedRegMaxSz += CALLEE_SAVED_FLOAT_MAXSZ; } calleeSavedRegMaxSz += REGSIZE_BYTES; // we always push LR. See genPushCalleeSavedRegisters noway_assert(frameSize >= calleeSavedRegMaxSz); #if defined(TARGET_ARM64) // TODO-ARM64-CQ: update this! JITDUMP(" Returning true (ARM64)\n\n"); return true; // just always assume we'll need it, for now #else // TARGET_ARM // frame layout: // // ... high addresses ... // frame contents size // ------------------- ------------------------ // inArgs compArgSize (includes prespill) // caller SP ---> // prespill // LR REGSIZE_BYTES // R11 ---> R11 REGSIZE_BYTES // callee saved regs CALLEE_SAVED_REG_MAXSZ (32 bytes) // optional saved fp regs CALLEE_SAVED_FLOAT_MAXSZ (64 bytes) // lclSize // incl. TEMPS MAX_SPILL_TEMP_SIZE // incl. outArgs // SP ---> // ... low addresses ... // // When codeGen->isFramePointerRequired is true, R11 will be established as a frame pointer. // We can then use R11 to access incoming args with positive offsets, and LclVars with // negative offsets. // // In functions with EH, in the non-funclet (or main) region, even though we will have a // frame pointer, we can use SP with positive offsets to access any or all locals or arguments // that we can reach with SP-relative encodings. The funclet region might require the reserved // register, since it must use offsets from R11 to access the parent frame. unsigned maxR11PositiveEncodingOffset = compFloatingPointUsed ? 0x03FC : 0x0FFF; JITDUMP(" maxR11PositiveEncodingOffset = %6d\n", maxR11PositiveEncodingOffset); // Floating point load/store instructions (VLDR/VSTR) can address up to -0x3FC from R11, but we // don't know if there are either no integer locals, or if we don't need large negative offsets // for the integer locals, so we must use the integer max negative offset, which is a // smaller (absolute value) number. unsigned maxR11NegativeEncodingOffset = 0x00FF; // This is a negative offset from R11. JITDUMP(" maxR11NegativeEncodingOffset = %6d\n", maxR11NegativeEncodingOffset); // -1 because otherwise we are computing the address just beyond the last argument, which we don't need to do. unsigned maxR11PositiveOffset = compArgSize + (2 * REGSIZE_BYTES) - 1; JITDUMP(" maxR11PositiveOffset = %6d\n", maxR11PositiveOffset); // The value is positive, but represents a negative offset from R11. // frameSize includes callee-saved space for R11 and LR, which are at non-negative offsets from R11 // (+0 and +4, respectively), so don't include those in the max possible negative offset. assert(frameSize >= (2 * REGSIZE_BYTES)); unsigned maxR11NegativeOffset = frameSize - (2 * REGSIZE_BYTES); JITDUMP(" maxR11NegativeOffset = %6d\n", maxR11NegativeOffset); if (codeGen->isFramePointerRequired()) { if (maxR11NegativeOffset > maxR11NegativeEncodingOffset) { JITDUMP(" Returning true (frame required and maxR11NegativeOffset)\n\n"); return true; } if (maxR11PositiveOffset > maxR11PositiveEncodingOffset) { JITDUMP(" Returning true (frame required and maxR11PositiveOffset)\n\n"); return true; } } // Now consider the SP based frame case. Note that we will use SP based offsets to access the stack in R11 based // frames in the non-funclet main code area. unsigned maxSPPositiveEncodingOffset = compFloatingPointUsed ? 0x03FC : 0x0FFF; JITDUMP(" maxSPPositiveEncodingOffset = %6d\n", maxSPPositiveEncodingOffset); // -1 because otherwise we are computing the address just beyond the last argument, which we don't need to do. assert(compArgSize + frameSize > 0); unsigned maxSPPositiveOffset = compArgSize + frameSize - 1; if (codeGen->isFramePointerUsed()) { // We have a frame pointer, so we can use it to access part of the stack, even if SP can't reach those parts. // We will still generate SP-relative offsets if SP can reach. // First, check that the stack between R11 and SP can be fully reached, either via negative offset from FP // or positive offset from SP. Don't count stored R11 or LR, which are reached from positive offsets from FP. unsigned maxSPLocalsCombinedOffset = frameSize - (2 * REGSIZE_BYTES) - 1; JITDUMP(" maxSPLocalsCombinedOffset = %6d\n", maxSPLocalsCombinedOffset); if (maxSPLocalsCombinedOffset > maxSPPositiveEncodingOffset) { // Can R11 help? unsigned maxRemainingLocalsCombinedOffset = maxSPLocalsCombinedOffset - maxSPPositiveEncodingOffset; JITDUMP(" maxRemainingLocalsCombinedOffset = %6d\n", maxRemainingLocalsCombinedOffset); if (maxRemainingLocalsCombinedOffset > maxR11NegativeEncodingOffset) { JITDUMP(" Returning true (frame pointer exists; R11 and SP can't reach entire stack between them)\n\n"); return true; } // Otherwise, yes, we can address the remaining parts of the locals frame with negative offsets from R11. } // Check whether either R11 or SP can access the arguments. if ((maxR11PositiveOffset > maxR11PositiveEncodingOffset) && (maxSPPositiveOffset > maxSPPositiveEncodingOffset)) { JITDUMP(" Returning true (frame pointer exists; R11 and SP can't reach all arguments)\n\n"); return true; } } else { if (maxSPPositiveOffset > maxSPPositiveEncodingOffset) { JITDUMP(" Returning true (no frame pointer exists; SP can't reach all of frame)\n\n"); return true; } } // We won't need to reserve REG_OPT_RSVD. // JITDUMP(" Returning false\n\n"); return false; #endif // TARGET_ARM } #endif // TARGET_ARMARCH //------------------------------------------------------------------------ // compGetTieringName: get a string describing tiered compilation settings // for this method // // Arguments: // wantShortName - true if a short name is ok (say for using in file names) // // Returns: // String describing tiering decisions for this method, including cases // where the jit codegen will differ from what the runtime requested. // const char* Compiler::compGetTieringName(bool wantShortName) const { const bool tier0 = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0); const bool tier1 = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1); assert(!tier0 || !tier1); // We don't expect multiple TIER flags to be set at one time. if (tier0) { return "Tier0"; } else if (tier1) { if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { return "Tier1-OSR"; } else { return "Tier1"; } } else if (opts.OptimizationEnabled()) { if (compSwitchedToOptimized) { return wantShortName ? "Tier0-FullOpts" : "Tier-0 switched to FullOpts"; } else { return "FullOpts"; } } else if (opts.MinOpts()) { if (compSwitchedToMinOpts) { if (compSwitchedToOptimized) { return wantShortName ? "Tier0-FullOpts-MinOpts" : "Tier-0 switched to FullOpts, then to MinOpts"; } else { return wantShortName ? "Tier0-MinOpts" : "Tier-0 switched MinOpts"; } } else { return "MinOpts"; } } else if (opts.compDbgCode) { return "Debug"; } else { return wantShortName ? "Unknown" : "Unknown optimization level"; } } //------------------------------------------------------------------------ // compGetStressMessage: get a string describing jitstress capability // for this method // // Returns: // An empty string if stress is not enabled, else a string describing // if this method is subject to stress or is excluded by name or hash. // const char* Compiler::compGetStressMessage() const { // Add note about stress where appropriate const char* stressMessage = ""; #ifdef DEBUG // Is stress enabled via mode name or level? if ((JitConfig.JitStressModeNames() != nullptr) || (getJitStressLevel() > 0)) { // Is the method being jitted excluded from stress via range? if (bRangeAllowStress) { // Or is it excluded via name? if (!JitConfig.JitStressOnly().isEmpty() || !JitConfig.JitStressOnly().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { // Not excluded -- stress can happen stressMessage = " JitStress"; } else { stressMessage = " NoJitStress(Only)"; } } else { stressMessage = " NoJitStress(Range)"; } } #endif // DEBUG return stressMessage; } void Compiler::compFunctionTraceStart() { #ifdef DEBUG if (compIsForInlining()) { return; } if ((JitConfig.JitFunctionTrace() != 0) && !opts.disDiffable) { LONG newJitNestingLevel = InterlockedIncrement(&Compiler::jitNestingLevel); if (newJitNestingLevel <= 0) { printf("{ Illegal nesting level %d }\n", newJitNestingLevel); } for (LONG i = 0; i < newJitNestingLevel - 1; i++) { printf(" "); } printf("{ Start Jitting Method %4d %s (MethodHash=%08x) %s\n", Compiler::jitTotalMethodCompiled, info.compFullName, info.compMethodHash(), compGetTieringName()); /* } editor brace matching workaround for this printf */ } #endif // DEBUG } void Compiler::compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI) { #ifdef DEBUG assert(!compIsForInlining()); if ((JitConfig.JitFunctionTrace() != 0) && !opts.disDiffable) { LONG newJitNestingLevel = InterlockedDecrement(&Compiler::jitNestingLevel); if (newJitNestingLevel < 0) { printf("{ Illegal nesting level %d }\n", newJitNestingLevel); } for (LONG i = 0; i < newJitNestingLevel; i++) { printf(" "); } // Note: that is incorrect if we are compiling several methods at the same time. unsigned methodNumber = Compiler::jitTotalMethodCompiled - 1; /* { editor brace-matching workaround for following printf */ printf("} Jitted Method %4d at" FMT_ADDR "method %s size %08x%s%s\n", methodNumber, DBG_ADDR(methodCodePtr), info.compFullName, methodCodeSize, isNYI ? " NYI" : (compIsForImportOnly() ? " import only" : ""), opts.altJit ? " altjit" : ""); } #endif // DEBUG } //------------------------------------------------------------------------ // BeginPhase: begin execution of a phase // // Arguments: // phase - the phase that is about to begin // void Compiler::BeginPhase(Phases phase) { mostRecentlyActivePhase = phase; } //------------------------------------------------------------------------ // EndPhase: finish execution of a phase // // Arguments: // phase - the phase that has just finished // void Compiler::EndPhase(Phases phase) { #if defined(FEATURE_JIT_METHOD_PERF) if (pCompJitTimer != nullptr) { pCompJitTimer->EndPhase(this, phase); } #endif mostRecentlyActivePhase = phase; } //------------------------------------------------------------------------ // compCompile: run phases needed for compilation // // Arguments: // methodCodePtr [OUT] - address of generated code // methodCodeSize [OUT] - size of the generated code (hot + cold setions) // compileFlags [IN] - flags controlling jit behavior // // Notes: // This is the most interesting 'toplevel' function in the JIT. It goes through the operations of // importing, morphing, optimizations and code generation. This is called from the EE through the // code:CILJit::compileMethod function. // // For an overview of the structure of the JIT, see: // https://github.com/dotnet/runtime/blob/main/docs/design/coreclr/jit/ryujit-overview.md // // Also called for inlinees, though they will only be run through the first few phases. // void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags) { // Prepare for importation // auto preImportPhase = [this]() { if (compIsForInlining()) { // Notify root instance that an inline attempt is about to import IL impInlineRoot()->m_inlineStrategy->NoteImport(); } hashBv::Init(this); VarSetOps::AssignAllowUninitRhs(this, compCurLife, VarSetOps::UninitVal()); // The temp holding the secret stub argument is used by fgImport() when importing the intrinsic. if (info.compPublishStubParam) { assert(lvaStubArgumentVar == BAD_VAR_NUM); lvaStubArgumentVar = lvaGrabTempWithImplicitUse(false DEBUGARG("stub argument")); lvaGetDesc(lvaStubArgumentVar)->lvType = TYP_I_IMPL; // TODO-CQ: there is no need to mark it as doNotEnreg. There are no stores for this local // before codegen so liveness and LSRA mark it as "liveIn" and always allocate a stack slot for it. // However, it would be better to process it like other argument locals and keep it in // a reg for the whole method without spilling to the stack when possible. lvaSetVarDoNotEnregister(lvaStubArgumentVar DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } }; DoPhase(this, PHASE_PRE_IMPORT, preImportPhase); compFunctionTraceStart(); // Incorporate profile data. // // Note: the importer is sensitive to block weights, so this has // to happen before importation. // DoPhase(this, PHASE_INCPROFILE, &Compiler::fgIncorporateProfileData); // If we're going to instrument code, we may need to prepare before // we import. // if (compileFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { DoPhase(this, PHASE_IBCPREP, &Compiler::fgPrepareToInstrumentMethod); } // Import: convert the instrs in each basic block to a tree based intermediate representation // DoPhase(this, PHASE_IMPORTATION, &Compiler::fgImport); // Expand any patchpoints // DoPhase(this, PHASE_PATCHPOINTS, &Compiler::fgTransformPatchpoints); // If instrumenting, add block and class probes. // if (compileFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { DoPhase(this, PHASE_IBCINSTR, &Compiler::fgInstrumentMethod); } // Transform indirect calls that require control flow expansion. // DoPhase(this, PHASE_INDXCALL, &Compiler::fgTransformIndirectCalls); // PostImportPhase: cleanup inlinees // auto postImportPhase = [this]() { // If this is a viable inline candidate if (compIsForInlining() && !compDonotInline()) { // Filter out unimported BBs in the inlinee // fgPostImportationCleanup(); // Update type of return spill temp if we have gathered // better info when importing the inlinee, and the return // spill temp is single def. if (fgNeedReturnSpillTemp()) { CORINFO_CLASS_HANDLE retExprClassHnd = impInlineInfo->retExprClassHnd; if (retExprClassHnd != nullptr) { LclVarDsc* returnSpillVarDsc = lvaGetDesc(lvaInlineeReturnSpillTemp); if (returnSpillVarDsc->lvSingleDef) { lvaUpdateClass(lvaInlineeReturnSpillTemp, retExprClassHnd, impInlineInfo->retExprClassHndIsExact); } } } } }; DoPhase(this, PHASE_POST_IMPORT, postImportPhase); // If we're importing for inlining, we're done. if (compIsForInlining()) { #ifdef FEATURE_JIT_METHOD_PERF if (pCompJitTimer != nullptr) { #if MEASURE_CLRAPI_CALLS EndPhase(PHASE_CLR_API); #endif pCompJitTimer->Terminate(this, CompTimeSummaryInfo::s_compTimeSummary, false); } #endif return; } // At this point in the phase list, all the inlinee phases have // been run, and inlinee compiles have exited, so we should only // get this far if we are jitting the root method. noway_assert(!compIsForInlining()); // Maybe the caller was not interested in generating code if (compIsForImportOnly()) { compFunctionTraceEnd(nullptr, 0, false); return; } #if !FEATURE_EH // If we aren't yet supporting EH in a compiler bring-up, remove as many EH handlers as possible, so // we can pass tests that contain try/catch EH, but don't actually throw any exceptions. fgRemoveEH(); #endif // !FEATURE_EH // We could allow ESP frames. Just need to reserve space for // pushing EBP if the method becomes an EBP-frame after an edit. // Note that requiring a EBP Frame disallows double alignment. Thus if we change this // we either have to disallow double alignment for E&C some other way or handle it in EETwain. if (opts.compDbgEnC) { codeGen->setFramePointerRequired(true); // We don't care about localloc right now. If we do support it, // EECodeManager::FixContextForEnC() needs to handle it smartly // in case the localloc was actually executed. // // compLocallocUsed = true; } // Start phases that are broadly called morphing, and includes // global morph, as well as other phases that massage the trees so // that we can generate code out of them. // auto morphInitPhase = [this]() { // Initialize the BlockSet epoch NewBasicBlockEpoch(); fgOutgoingArgTemps = nullptr; // Insert call to class constructor as the first basic block if // we were asked to do so. if (info.compCompHnd->initClass(nullptr /* field */, nullptr /* method */, impTokenLookupContextHandle /* context */) & CORINFO_INITCLASS_USE_HELPER) { fgEnsureFirstBBisScratch(); fgNewStmtAtBeg(fgFirstBB, fgInitThisClass()); } #ifdef DEBUG if (opts.compGcChecks) { for (unsigned i = 0; i < info.compArgsCount; i++) { if (lvaGetDesc(i)->TypeGet() == TYP_REF) { // confirm that the argument is a GC pointer (for debugging (GC stress)) GenTree* op = gtNewLclvNode(i, TYP_REF); GenTreeCall::Use* args = gtNewCallArgs(op); op = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_VOID, args); fgEnsureFirstBBisScratch(); fgNewStmtAtEnd(fgFirstBB, op); if (verbose) { printf("\ncompGcChecks tree:\n"); gtDispTree(op); } } } } #endif // DEBUG #if defined(DEBUG) && defined(TARGET_XARCH) if (opts.compStackCheckOnRet) { lvaReturnSpCheck = lvaGrabTempWithImplicitUse(false DEBUGARG("ReturnSpCheck")); lvaSetVarDoNotEnregister(lvaReturnSpCheck, DoNotEnregisterReason::ReturnSpCheck); lvaGetDesc(lvaReturnSpCheck)->lvType = TYP_I_IMPL; } #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) if (opts.compStackCheckOnCall) { lvaCallSpCheck = lvaGrabTempWithImplicitUse(false DEBUGARG("CallSpCheck")); lvaGetDesc(lvaCallSpCheck)->lvType = TYP_I_IMPL; } #endif // defined(DEBUG) && defined(TARGET_X86) // Update flow graph after importation. // Removes un-imported blocks, trims EH, and ensures correct OSR entry flow. // fgPostImportationCleanup(); }; DoPhase(this, PHASE_MORPH_INIT, morphInitPhase); #ifdef DEBUG // Inliner could add basic blocks. Check that the flowgraph data is up-to-date fgDebugCheckBBlist(false, false); #endif // DEBUG // Inline callee methods into this root method // DoPhase(this, PHASE_MORPH_INLINE, &Compiler::fgInline); // Record "start" values for post-inlining cycles and elapsed time. RecordStateAtEndOfInlining(); // Transform each GT_ALLOCOBJ node into either an allocation helper call or // local variable allocation on the stack. ObjectAllocator objectAllocator(this); // PHASE_ALLOCATE_OBJECTS if (compObjectStackAllocation() && opts.OptimizationEnabled()) { objectAllocator.EnableObjectStackAllocation(); } objectAllocator.Run(); // Add any internal blocks/trees we may need // DoPhase(this, PHASE_MORPH_ADD_INTERNAL, &Compiler::fgAddInternal); // Remove empty try regions // DoPhase(this, PHASE_EMPTY_TRY, &Compiler::fgRemoveEmptyTry); // Remove empty finally regions // DoPhase(this, PHASE_EMPTY_FINALLY, &Compiler::fgRemoveEmptyFinally); // Streamline chains of finally invocations // DoPhase(this, PHASE_MERGE_FINALLY_CHAINS, &Compiler::fgMergeFinallyChains); // Clone code in finallys to reduce overhead for non-exceptional paths // DoPhase(this, PHASE_CLONE_FINALLY, &Compiler::fgCloneFinally); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Update finally target flags after EH optimizations // DoPhase(this, PHASE_UPDATE_FINALLY_FLAGS, &Compiler::fgUpdateFinallyTargetFlags); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #if DEBUG if (lvaEnregEHVars) { unsigned methHash = info.compMethodHash(); char* lostr = getenv("JitEHWTHashLo"); unsigned methHashLo = 0; bool dump = false; if (lostr != nullptr) { sscanf_s(lostr, "%x", &methHashLo); dump = true; } char* histr = getenv("JitEHWTHashHi"); unsigned methHashHi = UINT32_MAX; if (histr != nullptr) { sscanf_s(histr, "%x", &methHashHi); dump = true; } if (methHash < methHashLo || methHash > methHashHi) { lvaEnregEHVars = false; } else if (dump) { printf("Enregistering EH Vars for method %s, hash = 0x%x.\n", info.compFullName, info.compMethodHash()); printf(""); // flush } } if (lvaEnregMultiRegVars) { unsigned methHash = info.compMethodHash(); char* lostr = getenv("JitMultiRegHashLo"); unsigned methHashLo = 0; bool dump = false; if (lostr != nullptr) { sscanf_s(lostr, "%x", &methHashLo); dump = true; } char* histr = getenv("JitMultiRegHashHi"); unsigned methHashHi = UINT32_MAX; if (histr != nullptr) { sscanf_s(histr, "%x", &methHashHi); dump = true; } if (methHash < methHashLo || methHash > methHashHi) { lvaEnregMultiRegVars = false; } else if (dump) { printf("Enregistering MultiReg Vars for method %s, hash = 0x%x.\n", info.compFullName, info.compMethodHash()); printf(""); // flush } } #endif // Compute bbNum, bbRefs and bbPreds // // This is the first time full (not cheap) preds will be computed. // And, if we have profile data, we can now check integrity. // // From this point on the flowgraph information such as bbNum, // bbRefs or bbPreds has to be kept updated. // auto computePredsPhase = [this]() { JITDUMP("\nRenumbering the basic blocks for fgComputePred\n"); fgRenumberBlocks(); noway_assert(!fgComputePredsDone); fgComputePreds(); }; DoPhase(this, PHASE_COMPUTE_PREDS, computePredsPhase); // Now that we have pred lists, do some flow-related optimizations // if (opts.OptimizationEnabled()) { // Merge common throw blocks // DoPhase(this, PHASE_MERGE_THROWS, &Compiler::fgTailMergeThrows); // Run an early flow graph simplification pass // auto earlyUpdateFlowGraphPhase = [this]() { constexpr bool doTailDup = false; fgUpdateFlowGraph(doTailDup); }; DoPhase(this, PHASE_EARLY_UPDATE_FLOW_GRAPH, earlyUpdateFlowGraphPhase); } // Promote struct locals // auto promoteStructsPhase = [this]() { // For x64 and ARM64 we need to mark irregular parameters lvaRefCountState = RCS_EARLY; fgResetImplicitByRefRefCount(); fgPromoteStructs(); }; DoPhase(this, PHASE_PROMOTE_STRUCTS, promoteStructsPhase); // Figure out what locals are address-taken. // DoPhase(this, PHASE_STR_ADRLCL, &Compiler::fgMarkAddressExposedLocals); // Run a simple forward substitution pass. // DoPhase(this, PHASE_FWD_SUB, &Compiler::fgForwardSub); // Apply the type update to implicit byref parameters; also choose (based on address-exposed // analysis) which implicit byref promotions to keep (requires copy to initialize) or discard. // DoPhase(this, PHASE_MORPH_IMPBYREF, &Compiler::fgRetypeImplicitByRefArgs); #ifdef DEBUG // Now that locals have address-taken and implicit byref marked, we can safely apply stress. lvaStressLclFld(); fgStress64RsltMul(); #endif // DEBUG // Morph the trees in all the blocks of the method // auto morphGlobalPhase = [this]() { unsigned prevBBCount = fgBBcount; fgMorphBlocks(); // Fix any LclVar annotations on discarded struct promotion temps for implicit by-ref args fgMarkDemotedImplicitByRefArgs(); lvaRefCountState = RCS_INVALID; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (fgNeedToAddFinallyTargetBits) { // We previously wiped out the BBF_FINALLY_TARGET bits due to some morphing; add them back. fgAddFinallyTargetFlags(); fgNeedToAddFinallyTargetBits = false; } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Decide the kind of code we want to generate fgSetOptions(); fgExpandQmarkNodes(); #ifdef DEBUG compCurBB = nullptr; #endif // DEBUG // If we needed to create any new BasicBlocks then renumber the blocks if (fgBBcount > prevBBCount) { fgRenumberBlocks(); } // We can now enable all phase checking activePhaseChecks = PhaseChecks::CHECK_ALL; }; DoPhase(this, PHASE_MORPH_GLOBAL, morphGlobalPhase); // GS security checks for unsafe buffers // auto gsPhase = [this]() { unsigned prevBBCount = fgBBcount; if (getNeedsGSSecurityCookie()) { gsGSChecksInitCookie(); if (compGSReorderStackLayout) { gsCopyShadowParams(); } // If we needed to create any new BasicBlocks then renumber the blocks if (fgBBcount > prevBBCount) { fgRenumberBlocks(); } } else { JITDUMP("No GS security needed\n"); } }; DoPhase(this, PHASE_GS_COOKIE, gsPhase); // Compute the block and edge weights // DoPhase(this, PHASE_COMPUTE_EDGE_WEIGHTS, &Compiler::fgComputeBlockAndEdgeWeights); #if defined(FEATURE_EH_FUNCLETS) // Create funclets from the EH handlers. // DoPhase(this, PHASE_CREATE_FUNCLETS, &Compiler::fgCreateFunclets); #endif // FEATURE_EH_FUNCLETS if (opts.OptimizationEnabled()) { // Invert loops // DoPhase(this, PHASE_INVERT_LOOPS, &Compiler::optInvertLoops); // Optimize block order // DoPhase(this, PHASE_OPTIMIZE_LAYOUT, &Compiler::optOptimizeLayout); // Compute reachability sets and dominators. // DoPhase(this, PHASE_COMPUTE_REACHABILITY, &Compiler::fgComputeReachability); // Scale block weights and mark run rarely blocks. // DoPhase(this, PHASE_SET_BLOCK_WEIGHTS, &Compiler::optSetBlockWeights); // Discover and classify natural loops (e.g. mark iterative loops as such). Also marks loop blocks // and sets bbWeight to the loop nesting levels. // DoPhase(this, PHASE_FIND_LOOPS, &Compiler::optFindLoopsPhase); // Clone loops with optimization opportunities, and choose one based on dynamic condition evaluation. // DoPhase(this, PHASE_CLONE_LOOPS, &Compiler::optCloneLoops); // Unroll loops // DoPhase(this, PHASE_UNROLL_LOOPS, &Compiler::optUnrollLoops); // Clear loop table info that is not used after this point, and might become invalid. // DoPhase(this, PHASE_CLEAR_LOOP_INFO, &Compiler::optClearLoopIterInfo); } #ifdef DEBUG fgDebugCheckLinks(); #endif // Create the variable table (and compute variable ref counts) // DoPhase(this, PHASE_MARK_LOCAL_VARS, &Compiler::lvaMarkLocalVars); // IMPORTANT, after this point, locals are ref counted. // However, ref counts are not kept incrementally up to date. assert(lvaLocalVarRefCounted()); if (opts.OptimizationEnabled()) { // Optimize boolean conditions // DoPhase(this, PHASE_OPTIMIZE_BOOLS, &Compiler::optOptimizeBools); // optOptimizeBools() might have changed the number of blocks; the dominators/reachability might be bad. } // Figure out the order in which operators are to be evaluated // DoPhase(this, PHASE_FIND_OPER_ORDER, &Compiler::fgFindOperOrder); // Weave the tree lists. Anyone who modifies the tree shapes after // this point is responsible for calling fgSetStmtSeq() to keep the // nodes properly linked. // This can create GC poll calls, and create new BasicBlocks (without updating dominators/reachability). // DoPhase(this, PHASE_SET_BLOCK_ORDER, &Compiler::fgSetBlockOrder); // At this point we know if we are fully interruptible or not if (opts.OptimizationEnabled()) { bool doSsa = true; bool doEarlyProp = true; bool doValueNum = true; bool doLoopHoisting = true; bool doCopyProp = true; bool doBranchOpt = true; bool doAssertionProp = true; bool doRangeAnalysis = true; int iterations = 1; #if defined(OPT_CONFIG) doSsa = (JitConfig.JitDoSsa() != 0); doEarlyProp = doSsa && (JitConfig.JitDoEarlyProp() != 0); doValueNum = doSsa && (JitConfig.JitDoValueNumber() != 0); doLoopHoisting = doValueNum && (JitConfig.JitDoLoopHoisting() != 0); doCopyProp = doValueNum && (JitConfig.JitDoCopyProp() != 0); doBranchOpt = doValueNum && (JitConfig.JitDoRedundantBranchOpts() != 0); doAssertionProp = doValueNum && (JitConfig.JitDoAssertionProp() != 0); doRangeAnalysis = doAssertionProp && (JitConfig.JitDoRangeAnalysis() != 0); if (opts.optRepeat) { iterations = JitConfig.JitOptRepeatCount(); } #endif // defined(OPT_CONFIG) while (iterations > 0) { if (doSsa) { // Build up SSA form for the IR // DoPhase(this, PHASE_BUILD_SSA, &Compiler::fgSsaBuild); } if (doEarlyProp) { // Propagate array length and rewrite getType() method call // DoPhase(this, PHASE_EARLY_PROP, &Compiler::optEarlyProp); } if (doValueNum) { // Value number the trees // DoPhase(this, PHASE_VALUE_NUMBER, &Compiler::fgValueNumber); } if (doLoopHoisting) { // Hoist invariant code out of loops // DoPhase(this, PHASE_HOIST_LOOP_CODE, &Compiler::optHoistLoopCode); } if (doCopyProp) { // Perform VN based copy propagation // DoPhase(this, PHASE_VN_COPY_PROP, &Compiler::optVnCopyProp); } if (doBranchOpt) { DoPhase(this, PHASE_OPTIMIZE_BRANCHES, &Compiler::optRedundantBranches); } // Remove common sub-expressions // DoPhase(this, PHASE_OPTIMIZE_VALNUM_CSES, &Compiler::optOptimizeCSEs); if (doAssertionProp) { // Assertion propagation // DoPhase(this, PHASE_ASSERTION_PROP_MAIN, &Compiler::optAssertionPropMain); } if (doRangeAnalysis) { auto rangePhase = [this]() { RangeCheck rc(this); rc.OptimizeRangeChecks(); }; // Bounds check elimination via range analysis // DoPhase(this, PHASE_OPTIMIZE_INDEX_CHECKS, rangePhase); } if (fgModified) { // update the flowgraph if we modified it during the optimization phase // auto optUpdateFlowGraphPhase = [this]() { constexpr bool doTailDup = false; fgUpdateFlowGraph(doTailDup); }; DoPhase(this, PHASE_OPT_UPDATE_FLOW_GRAPH, optUpdateFlowGraphPhase); // Recompute the edge weight if we have modified the flow graph // DoPhase(this, PHASE_COMPUTE_EDGE_WEIGHTS2, &Compiler::fgComputeEdgeWeights); } // Iterate if requested, resetting annotations first. if (--iterations == 0) { break; } ResetOptAnnotations(); RecomputeLoopInfo(); } } // Insert GC Polls DoPhase(this, PHASE_INSERT_GC_POLLS, &Compiler::fgInsertGCPolls); // Determine start of cold region if we are hot/cold splitting // DoPhase(this, PHASE_DETERMINE_FIRST_COLD_BLOCK, &Compiler::fgDetermineFirstColdBlock); #ifdef DEBUG fgDebugCheckLinks(compStressCompile(STRESS_REMORPH_TREES, 50)); // Stash the current estimate of the function's size if necessary. if (verbose) { compSizeEstimate = 0; compCycleEstimate = 0; for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { compSizeEstimate += stmt->GetCostSz(); compCycleEstimate += stmt->GetCostEx(); } } } #endif // rationalize trees Rationalizer rat(this); // PHASE_RATIONALIZE rat.Run(); // Here we do "simple lowering". When the RyuJIT backend works for all // platforms, this will be part of the more general lowering phase. For now, though, we do a separate // pass of "final lowering." We must do this before (final) liveness analysis, because this creates // range check throw blocks, in which the liveness must be correct. // DoPhase(this, PHASE_SIMPLE_LOWERING, &Compiler::fgSimpleLowering); // Enable this to gather statistical data such as // call and register argument info, flowgraph and loop info, etc. compJitStats(); #ifdef TARGET_ARM if (compLocallocUsed) { // We reserve REG_SAVED_LOCALLOC_SP to store SP on entry for stack unwinding codeGen->regSet.rsMaskResvd |= RBM_SAVED_LOCALLOC_SP; } #endif // TARGET_ARM // Assign registers to variables, etc. /////////////////////////////////////////////////////////////////////////////// // Dominator and reachability sets are no longer valid. They haven't been // maintained up to here, and shouldn't be used (unless recomputed). /////////////////////////////////////////////////////////////////////////////// fgDomsComputed = false; // Create LinearScan before Lowering, so that Lowering can call LinearScan methods // for determining whether locals are register candidates and (for xarch) whether // a node is a containable memory op. m_pLinearScan = getLinearScanAllocator(this); // Lower // m_pLowering = new (this, CMK_LSRA) Lowering(this, m_pLinearScan); // PHASE_LOWERING m_pLowering->Run(); if (!compMacOsArm64Abi()) { // Set stack levels; this information is necessary for x86 // but on other platforms it is used only in asserts. // TODO: do not run it in release on other platforms, see https://github.com/dotnet/runtime/issues/42673. StackLevelSetter stackLevelSetter(this); stackLevelSetter.Run(); } // We can not add any new tracked variables after this point. lvaTrackedFixed = true; // Now that lowering is completed we can proceed to perform register allocation // auto linearScanPhase = [this]() { m_pLinearScan->doLinearScan(); }; DoPhase(this, PHASE_LINEAR_SCAN, linearScanPhase); // Copied from rpPredictRegUse() SetFullPtrRegMapRequired(codeGen->GetInterruptible() || !codeGen->isFramePointerUsed()); #if FEATURE_LOOP_ALIGN // Place loop alignment instructions DoPhase(this, PHASE_ALIGN_LOOPS, &Compiler::placeLoopAlignInstructions); #endif // Generate code codeGen->genGenerateCode(methodCodePtr, methodCodeSize); #if TRACK_LSRA_STATS if (JitConfig.DisplayLsraStats() == 2) { m_pLinearScan->dumpLsraStatsCsv(jitstdout); } #endif // TRACK_LSRA_STATS // We're done -- set the active phase to the last phase // (which isn't really a phase) mostRecentlyActivePhase = PHASE_POST_EMIT; #ifdef FEATURE_JIT_METHOD_PERF if (pCompJitTimer) { #if MEASURE_CLRAPI_CALLS EndPhase(PHASE_CLR_API); #else EndPhase(PHASE_POST_EMIT); #endif pCompJitTimer->Terminate(this, CompTimeSummaryInfo::s_compTimeSummary, true); } #endif // Generate PatchpointInfo generatePatchpointInfo(); RecordStateAtEndOfCompilation(); #ifdef FEATURE_TRACELOGGING compJitTelemetry.NotifyEndOfCompilation(); #endif #if defined(DEBUG) ++Compiler::jitTotalMethodCompiled; #endif // defined(DEBUG) compFunctionTraceEnd(*methodCodePtr, *methodCodeSize, false); JITDUMP("Method code size: %d\n", (unsigned)(*methodCodeSize)); #if FUNC_INFO_LOGGING if (compJitFuncInfoFile != nullptr) { assert(!compIsForInlining()); #ifdef DEBUG // We only have access to info.compFullName in DEBUG builds. fprintf(compJitFuncInfoFile, "%s\n", info.compFullName); #elif FEATURE_SIMD fprintf(compJitFuncInfoFile, " %s\n", eeGetMethodFullName(info.compMethodHnd)); #endif fprintf(compJitFuncInfoFile, ""); // in our logic this causes a flush } #endif // FUNC_INFO_LOGGING } #if FEATURE_LOOP_ALIGN //------------------------------------------------------------------------ // placeLoopAlignInstructions: Iterate over all the blocks and determine // the best position to place the 'align' instruction. Inserting 'align' // instructions after an unconditional branch is preferred over inserting // in the block before the loop. In case there are multiple blocks // having 'jmp', the one that has lower weight is preferred. // If the block having 'jmp' is hotter than the block before the loop, // the align will still be placed after 'jmp' because the processor should // be smart enough to not fetch extra instruction beyond jmp. // void Compiler::placeLoopAlignInstructions() { if (loopAlignCandidates == 0) { return; } int loopsToProcess = loopAlignCandidates; JITDUMP("Inside placeLoopAlignInstructions for %d loops.\n", loopAlignCandidates); // Add align only if there were any loops that needed alignment weight_t minBlockSoFar = BB_MAX_WEIGHT; BasicBlock* bbHavingAlign = nullptr; BasicBlock::loopNumber currentAlignedLoopNum = BasicBlock::NOT_IN_LOOP; if ((fgFirstBB != nullptr) && fgFirstBB->isLoopAlign()) { // Adding align instruction in prolog is not supported // hence just remove that loop from our list. loopsToProcess--; } for (BasicBlock* const block : Blocks()) { if (currentAlignedLoopNum != BasicBlock::NOT_IN_LOOP) { // We've been processing blocks within an aligned loop. Are we out of that loop now? if (currentAlignedLoopNum != block->bbNatLoopNum) { currentAlignedLoopNum = BasicBlock::NOT_IN_LOOP; } } // If there is a unconditional jump (which is not part of callf/always pair) if (opts.compJitHideAlignBehindJmp && (block->bbJumpKind == BBJ_ALWAYS) && !block->isBBCallAlwaysPairTail()) { // Track the lower weight blocks if (block->bbWeight < minBlockSoFar) { if (currentAlignedLoopNum == BasicBlock::NOT_IN_LOOP) { // Ok to insert align instruction in this block because it is not part of any aligned loop. minBlockSoFar = block->bbWeight; bbHavingAlign = block; JITDUMP(FMT_BB ", bbWeight=" FMT_WT " ends with unconditional 'jmp' \n", block->bbNum, block->bbWeight); } } } if ((block->bbNext != nullptr) && (block->bbNext->isLoopAlign())) { // If jmp was not found, then block before the loop start is where align instruction will be added. if (bbHavingAlign == nullptr) { bbHavingAlign = block; JITDUMP("Marking " FMT_BB " before the loop with BBF_HAS_ALIGN for loop at " FMT_BB "\n", block->bbNum, block->bbNext->bbNum); } else { JITDUMP("Marking " FMT_BB " that ends with unconditional jump with BBF_HAS_ALIGN for loop at " FMT_BB "\n", bbHavingAlign->bbNum, block->bbNext->bbNum); } bbHavingAlign->bbFlags |= BBF_HAS_ALIGN; minBlockSoFar = BB_MAX_WEIGHT; bbHavingAlign = nullptr; currentAlignedLoopNum = block->bbNext->bbNatLoopNum; if (--loopsToProcess == 0) { break; } } } assert(loopsToProcess == 0); } #endif //------------------------------------------------------------------------ // generatePatchpointInfo: allocate and fill in patchpoint info data, // and report it to the VM // void Compiler::generatePatchpointInfo() { if (!doesMethodHavePatchpoints() && !doesMethodHavePartialCompilationPatchpoints()) { // Nothing to report return; } // Patchpoints are only found in Tier0 code, which is unoptimized, and so // should always have frame pointer. assert(codeGen->isFramePointerUsed()); // Allocate patchpoint info storage from runtime, and fill in initial bits of data. const unsigned patchpointInfoSize = PatchpointInfo::ComputeSize(info.compLocalsCount); PatchpointInfo* const patchpointInfo = (PatchpointInfo*)info.compCompHnd->allocateArray(patchpointInfoSize); // Patchpoint offsets always refer to "virtual frame offsets". // // For x64 this falls out because Tier0 frames are always FP frames, and so the FP-relative // offset is what we want. // // For arm64, if the frame pointer is not at the top of the frame, we need to adjust the // offset. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_AMD64) // We add +TARGET_POINTER_SIZE here is to account for the slot that Jit_Patchpoint // creates when it simulates calling the OSR method (the "pseudo return address" slot). // This is effectively a new slot at the bottom of the Tier0 frame. // const int totalFrameSize = codeGen->genTotalFrameSize() + TARGET_POINTER_SIZE; const int offsetAdjust = 0; #elif defined(TARGET_ARM64) // SP is not manipulated by calls so no frame size adjustment needed. // Local Offsets may need adjusting, if FP is at bottom of frame. // const int totalFrameSize = codeGen->genTotalFrameSize(); const int offsetAdjust = codeGen->genSPtoFPdelta() - totalFrameSize; #else NYI("patchpoint info generation"); const int offsetAdjust = 0; const int totalFrameSize = 0; #endif patchpointInfo->Initialize(info.compLocalsCount, totalFrameSize); JITDUMP("--OSR--- Total Frame Size %d, local offset adjust is %d\n", patchpointInfo->TotalFrameSize(), offsetAdjust); // We record offsets for all the "locals" here. Could restrict // this to just the IL locals with some extra logic, and save a bit of space, // but would need to adjust all consumers, too. for (unsigned lclNum = 0; lclNum < info.compLocalsCount; lclNum++) { LclVarDsc* const varDsc = lvaGetDesc(lclNum); // We expect all these to have stack homes, and be FP relative assert(varDsc->lvOnFrame); assert(varDsc->lvFramePointerBased); // Record FramePtr relative offset (no localloc yet) patchpointInfo->SetOffset(lclNum, varDsc->GetStackOffset() + offsetAdjust); // Note if IL stream contained an address-of that potentially leads to exposure. // This bit of IL may be skipped by OSR partial importation. if (varDsc->lvHasLdAddrOp) { patchpointInfo->SetIsExposed(lclNum); } JITDUMP("--OSR-- V%02u is at virtual offset %d%s\n", lclNum, patchpointInfo->Offset(lclNum), patchpointInfo->IsExposed(lclNum) ? " (exposed)" : ""); } // Special offsets // if (lvaReportParamTypeArg()) { const int offset = lvaCachedGenericContextArgOffset(); patchpointInfo->SetGenericContextArgOffset(offset + offsetAdjust); JITDUMP("--OSR-- cached generic context virtual offset is %d\n", patchpointInfo->GenericContextArgOffset()); } if (lvaKeepAliveAndReportThis()) { const int offset = lvaCachedGenericContextArgOffset(); patchpointInfo->SetKeptAliveThisOffset(offset + offsetAdjust); JITDUMP("--OSR-- kept-alive this virtual offset is %d\n", patchpointInfo->KeptAliveThisOffset()); } if (compGSReorderStackLayout) { assert(lvaGSSecurityCookie != BAD_VAR_NUM); LclVarDsc* const varDsc = lvaGetDesc(lvaGSSecurityCookie); patchpointInfo->SetSecurityCookieOffset(varDsc->GetStackOffset() + offsetAdjust); JITDUMP("--OSR-- security cookie V%02u virtual offset is %d\n", lvaGSSecurityCookie, patchpointInfo->SecurityCookieOffset()); } if (lvaMonAcquired != BAD_VAR_NUM) { LclVarDsc* const varDsc = lvaGetDesc(lvaMonAcquired); patchpointInfo->SetMonitorAcquiredOffset(varDsc->GetStackOffset() + offsetAdjust); JITDUMP("--OSR-- monitor acquired V%02u virtual offset is %d\n", lvaMonAcquired, patchpointInfo->MonitorAcquiredOffset()); } #if defined(TARGET_AMD64) // Record callee save registers. // Currently only needed for x64. // regMaskTP rsPushRegs = codeGen->regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED; rsPushRegs |= RBM_FPBASE; patchpointInfo->SetCalleeSaveRegisters((uint64_t)rsPushRegs); JITDUMP("--OSR-- Tier0 callee saves: "); JITDUMPEXEC(dspRegMask((regMaskTP)patchpointInfo->CalleeSaveRegisters())); JITDUMP("\n"); #endif // Register this with the runtime. info.compCompHnd->setPatchpointInfo(patchpointInfo); } //------------------------------------------------------------------------ // ResetOptAnnotations: Clear annotations produced during global optimizations. // // Notes: // The intent of this method is to clear any information typically assumed // to be set only once; it is used between iterations when JitOptRepeat is // in effect. void Compiler::ResetOptAnnotations() { assert(opts.optRepeat); assert(JitConfig.JitOptRepeatCount() > 0); fgResetForSsa(); vnStore = nullptr; m_opAsgnVarDefSsaNums = nullptr; m_blockToEHPreds = nullptr; fgSsaPassesCompleted = 0; fgVNPassesCompleted = 0; for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { for (GenTree* const tree : stmt->TreeList()) { tree->ClearVN(); tree->ClearAssertion(); tree->gtCSEnum = NO_CSE; } } } } //------------------------------------------------------------------------ // RecomputeLoopInfo: Recompute loop annotations between opt-repeat iterations. // // Notes: // The intent of this method is to update loop structure annotations, and those // they depend on; these annotations may have become stale during optimization, // and need to be up-to-date before running another iteration of optimizations. // void Compiler::RecomputeLoopInfo() { assert(opts.optRepeat); assert(JitConfig.JitOptRepeatCount() > 0); // Recompute reachability sets, dominators, and loops. optResetLoopInfo(); fgDomsComputed = false; fgComputeReachability(); optSetBlockWeights(); // Rebuild the loop tree annotations themselves optFindLoops(); } /*****************************************************************************/ void Compiler::ProcessShutdownWork(ICorStaticInfo* statInfo) { } /*****************************************************************************/ #ifdef DEBUG void* forceFrameJIT; // used to force to frame &useful for fastchecked debugging bool Compiler::skipMethod() { static ConfigMethodRange fJitRange; fJitRange.EnsureInit(JitConfig.JitRange()); assert(!fJitRange.Error()); // Normally JitConfig.JitRange() is null, we don't want to skip // jitting any methods. // // So, the logic below relies on the fact that a null range string // passed to ConfigMethodRange represents the set of all methods. if (!fJitRange.Contains(info.compMethodHash())) { return true; } if (JitConfig.JitExclude().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return true; } if (!JitConfig.JitInclude().isEmpty() && !JitConfig.JitInclude().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return true; } return false; } #endif /*****************************************************************************/ int Compiler::compCompile(CORINFO_MODULE_HANDLE classPtr, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags) { // compInit should have set these already. noway_assert(info.compMethodInfo != nullptr); noway_assert(info.compCompHnd != nullptr); noway_assert(info.compMethodHnd != nullptr); #ifdef FEATURE_JIT_METHOD_PERF static bool checkedForJitTimeLog = false; pCompJitTimer = nullptr; if (!checkedForJitTimeLog) { // Call into VM to get the config strings. FEATURE_JIT_METHOD_PERF is enabled for // retail builds. Do not call the regular Config helper here as it would pull // in a copy of the config parser into the clrjit.dll. InterlockedCompareExchangeT(&Compiler::compJitTimeLogFilename, (LPCWSTR)info.compCompHnd->getJitTimeLogFilename(), NULL); // At a process or module boundary clear the file and start afresh. JitTimer::PrintCsvHeader(); checkedForJitTimeLog = true; } if ((Compiler::compJitTimeLogFilename != nullptr) || (JitTimeLogCsv() != nullptr)) { pCompJitTimer = JitTimer::Create(this, info.compMethodInfo->ILCodeSize); } #endif // FEATURE_JIT_METHOD_PERF #ifdef DEBUG Compiler* me = this; forceFrameJIT = (void*)&me; // let us see the this pointer in fastchecked build // set this early so we can use it without relying on random memory values verbose = compIsForInlining() ? impInlineInfo->InlinerCompiler->verbose : false; #endif #if FUNC_INFO_LOGGING LPCWSTR tmpJitFuncInfoFilename = JitConfig.JitFuncInfoFile(); if (tmpJitFuncInfoFilename != nullptr) { LPCWSTR oldFuncInfoFileName = InterlockedCompareExchangeT(&compJitFuncInfoFilename, tmpJitFuncInfoFilename, NULL); if (oldFuncInfoFileName == nullptr) { assert(compJitFuncInfoFile == nullptr); compJitFuncInfoFile = _wfopen(compJitFuncInfoFilename, W("a")); if (compJitFuncInfoFile == nullptr) { #if defined(DEBUG) && !defined(HOST_UNIX) // no 'perror' in the PAL perror("Failed to open JitFuncInfoLogFile"); #endif // defined(DEBUG) && !defined(HOST_UNIX) } } } #endif // FUNC_INFO_LOGGING // if (s_compMethodsCount==0) setvbuf(jitstdout, NULL, _IONBF, 0); if (compIsForInlining()) { compileFlags->Clear(JitFlags::JIT_FLAG_OSR); info.compILEntry = 0; info.compPatchpointInfo = nullptr; } else if (compileFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { // Fetch OSR info from the runtime info.compPatchpointInfo = info.compCompHnd->getOSRInfo(&info.compILEntry); assert(info.compPatchpointInfo != nullptr); } #if defined(TARGET_ARM64) compFrameInfo = {0}; #endif virtualStubParamInfo = new (this, CMK_Unknown) VirtualStubParamInfo(IsTargetAbi(CORINFO_CORERT_ABI)); // compMatchedVM is set to true if both CPU/ABI and OS are matching the execution engine requirements // // Do we have a matched VM? Or are we "abusing" the VM to help us do JIT work (such as using an x86 native VM // with an ARM-targeting "altjit"). // Match CPU/ABI for compMatchedVM info.compMatchedVM = IMAGE_FILE_MACHINE_TARGET == info.compCompHnd->getExpectedTargetArchitecture(); // Match OS for compMatchedVM CORINFO_EE_INFO* eeInfo = eeGetEEInfo(); #ifdef TARGET_OS_RUNTIMEDETERMINED noway_assert(TargetOS::OSSettingConfigured); #endif if (TargetOS::IsMacOS) { info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_MACOS); } else if (TargetOS::IsUnix) { if (TargetArchitecture::IsX64) { // MacOS x64 uses the Unix jit variant in crossgen2, not a special jit info.compMatchedVM = info.compMatchedVM && ((eeInfo->osType == CORINFO_UNIX) || (eeInfo->osType == CORINFO_MACOS)); } else { info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_UNIX); } } else if (TargetOS::IsWindows) { info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_WINNT); } // If we are not compiling for a matched VM, then we are getting JIT flags that don't match our target // architecture. The two main examples here are an ARM targeting altjit hosted on x86 and an ARM64 // targeting altjit hosted on x64. (Though with cross-bitness work, the host doesn't necessarily need // to be of the same bitness.) In these cases, we need to fix up the JIT flags to be appropriate for // the target, as the VM's expected target may overlap bit flags with different meaning to our target. // Note that it might be better to do this immediately when setting the JIT flags in CILJit::compileMethod() // (when JitFlags::SetFromFlags() is called), but this is close enough. (To move this logic to // CILJit::compileMethod() would require moving the info.compMatchedVM computation there as well.) if (!info.compMatchedVM) { #if defined(TARGET_ARM) // Currently nothing needs to be done. There are no ARM flags that conflict with other flags. #endif // defined(TARGET_ARM) #if defined(TARGET_ARM64) // The x86/x64 architecture capabilities flags overlap with the ARM64 ones. Set a reasonable architecture // target default. Currently this is disabling all ARM64 architecture features except FP and SIMD, but this // should be altered to possibly enable all of them, when they are known to all work. CORINFO_InstructionSetFlags defaultArm64Flags; defaultArm64Flags.AddInstructionSet(InstructionSet_ArmBase); defaultArm64Flags.AddInstructionSet(InstructionSet_AdvSimd); defaultArm64Flags.Set64BitInstructionSetVariants(); compileFlags->SetInstructionSetFlags(defaultArm64Flags); #endif // defined(TARGET_ARM64) } compMaxUncheckedOffsetForNullObject = eeGetEEInfo()->maxUncheckedOffsetForNullObject; // Set the context for token lookup. if (compIsForInlining()) { impTokenLookupContextHandle = impInlineInfo->tokenLookupContextHandle; assert(impInlineInfo->inlineCandidateInfo->clsHandle == info.compCompHnd->getMethodClass(info.compMethodHnd)); info.compClassHnd = impInlineInfo->inlineCandidateInfo->clsHandle; assert(impInlineInfo->inlineCandidateInfo->clsAttr == info.compCompHnd->getClassAttribs(info.compClassHnd)); // printf("%x != %x\n", impInlineInfo->inlineCandidateInfo->clsAttr, // info.compCompHnd->getClassAttribs(info.compClassHnd)); info.compClassAttr = impInlineInfo->inlineCandidateInfo->clsAttr; } else { impTokenLookupContextHandle = METHOD_BEING_COMPILED_CONTEXT(); info.compClassHnd = info.compCompHnd->getMethodClass(info.compMethodHnd); info.compClassAttr = info.compCompHnd->getClassAttribs(info.compClassHnd); } #ifdef DEBUG if (JitConfig.EnableExtraSuperPmiQueries()) { // This call to getClassModule/getModuleAssembly/getAssemblyName fails in crossgen2 due to these // APIs being unimplemented. So disable this extra info for pre-jit mode. See // https://github.com/dotnet/runtime/issues/48888. // // Ditto for some of the class name queries for generic params. // if (!compileFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { // Get the assembly name, to aid finding any particular SuperPMI method context function (void)info.compCompHnd->getAssemblyName( info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd))); // Fetch class names for the method's generic parameters. // CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(info.compMethodHnd, &sig, nullptr); const unsigned classInst = sig.sigInst.classInstCount; if (classInst > 0) { for (unsigned i = 0; i < classInst; i++) { eeGetClassName(sig.sigInst.classInst[i]); } } const unsigned methodInst = sig.sigInst.methInstCount; if (methodInst > 0) { for (unsigned i = 0; i < methodInst; i++) { eeGetClassName(sig.sigInst.methInst[i]); } } } } #endif // DEBUG info.compProfilerCallback = false; // Assume false until we are told to hook this method. #ifdef DEBUG if (!compIsForInlining()) { JitTls::GetLogEnv()->setCompiler(this); } // Have we been told to be more selective in our Jitting? if (skipMethod()) { if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_MARKED_AS_SKIPPED); } return CORJIT_SKIPPED; } #endif // DEBUG // Set this before the first 'BADCODE' // Skip verification where possible assert(compileFlags->IsSet(JitFlags::JIT_FLAG_SKIP_VERIFICATION)); /* Setup an error trap */ struct Param { Compiler* pThis; CORINFO_MODULE_HANDLE classPtr; COMP_HANDLE compHnd; CORINFO_METHOD_INFO* methodInfo; void** methodCodePtr; uint32_t* methodCodeSize; JitFlags* compileFlags; int result; } param; param.pThis = this; param.classPtr = classPtr; param.compHnd = info.compCompHnd; param.methodInfo = info.compMethodInfo; param.methodCodePtr = methodCodePtr; param.methodCodeSize = methodCodeSize; param.compileFlags = compileFlags; param.result = CORJIT_INTERNALERROR; setErrorTrap(info.compCompHnd, Param*, pParam, &param) // ERROR TRAP: Start normal block { pParam->result = pParam->pThis->compCompileHelper(pParam->classPtr, pParam->compHnd, pParam->methodInfo, pParam->methodCodePtr, pParam->methodCodeSize, pParam->compileFlags); } finallyErrorTrap() // ERROR TRAP: The following block handles errors { /* Cleanup */ if (compIsForInlining()) { goto DoneCleanUp; } /* Tell the emitter that we're done with this function */ GetEmitter()->emitEndCG(); DoneCleanUp: compDone(); } endErrorTrap() // ERROR TRAP: End return param.result; } #if defined(DEBUG) || defined(INLINE_DATA) //------------------------------------------------------------------------ // compMethodHash: get hash code for currently jitted method // // Returns: // Hash based on method's full name // unsigned Compiler::Info::compMethodHash() const { if (compMethodHashPrivate == 0) { // compMethodHashPrivate = compCompHnd->getMethodHash(compMethodHnd); assert(compFullName != nullptr); assert(*compFullName != 0); COUNT_T hash = HashStringA(compFullName); // Use compFullName to generate the hash, as it contains the signature // and return type compMethodHashPrivate = hash; } return compMethodHashPrivate; } //------------------------------------------------------------------------ // compMethodHash: get hash code for specified method // // Arguments: // methodHnd - method of interest // // Returns: // Hash based on method's full name // unsigned Compiler::compMethodHash(CORINFO_METHOD_HANDLE methodHnd) { // If this is the root method, delegate to the caching version // if (methodHnd == info.compMethodHnd) { return info.compMethodHash(); } // Else compute from scratch. Might consider caching this too. // unsigned methodHash = 0; const char* calleeName = eeGetMethodFullName(methodHnd); if (calleeName != nullptr) { methodHash = HashStringA(calleeName); } else { methodHash = info.compCompHnd->getMethodHash(methodHnd); } return methodHash; } #endif // defined(DEBUG) || defined(INLINE_DATA) void Compiler::compCompileFinish() { #if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS genMethodCnt++; #endif #if MEASURE_MEM_ALLOC { compArenaAllocator->finishMemStats(); memAllocHist.record((unsigned)((compArenaAllocator->getTotalBytesAllocated() + 1023) / 1024)); memUsedHist.record((unsigned)((compArenaAllocator->getTotalBytesUsed() + 1023) / 1024)); } #ifdef DEBUG if (s_dspMemStats || verbose) { printf("\nAllocations for %s (MethodHash=%08x)\n", info.compFullName, info.compMethodHash()); compArenaAllocator->dumpMemStats(jitstdout); } #endif // DEBUG #endif // MEASURE_MEM_ALLOC #if LOOP_HOIST_STATS AddLoopHoistStats(); #endif // LOOP_HOIST_STATS #if MEASURE_NODE_SIZE genTreeNcntHist.record(static_cast<unsigned>(genNodeSizeStatsPerFunc.genTreeNodeCnt)); genTreeNsizHist.record(static_cast<unsigned>(genNodeSizeStatsPerFunc.genTreeNodeSize)); #endif #if defined(DEBUG) // Small methods should fit in ArenaAllocator::getDefaultPageSize(), or else // we should bump up ArenaAllocator::getDefaultPageSize() if ((info.compILCodeSize <= 32) && // Is it a reasonably small method? (info.compNativeCodeSize < 512) && // Some trivial methods generate huge native code. eg. pushing a single huge // struct (impInlinedCodeSize <= 128) && // Is the the inlining reasonably bounded? // Small methods cannot meaningfully have a big number of locals // or arguments. We always track arguments at the start of // the prolog which requires memory (info.compLocalsCount <= 32) && (!opts.MinOpts()) && // We may have too many local variables, etc (getJitStressLevel() == 0) && // We need extra memory for stress !opts.optRepeat && // We need extra memory to repeat opts !compArenaAllocator->bypassHostAllocator() && // ArenaAllocator::getDefaultPageSize() is artificially low for // DirectAlloc // Factor of 2x is because data-structures are bigger under DEBUG (compArenaAllocator->getTotalBytesAllocated() > (2 * ArenaAllocator::getDefaultPageSize())) && // RyuJIT backend needs memory tuning! TODO-Cleanup: remove this case when memory tuning is complete. (compArenaAllocator->getTotalBytesAllocated() > (10 * ArenaAllocator::getDefaultPageSize())) && !verbose) // We allocate lots of memory to convert sets to strings for JitDump { genSmallMethodsNeedingExtraMemoryCnt++; // Less than 1% of all methods should run into this. // We cannot be more strict as there are always degenerate cases where we // would need extra memory (like huge structs as locals - see lvaSetStruct()). assert((genMethodCnt < 500) || (genSmallMethodsNeedingExtraMemoryCnt < (genMethodCnt / 100))); } #endif // DEBUG #if defined(DEBUG) || defined(INLINE_DATA) m_inlineStrategy->DumpData(); if (JitConfig.JitInlineDumpXmlFile() != nullptr) { FILE* file = _wfopen(JitConfig.JitInlineDumpXmlFile(), W("a")); if (file != nullptr) { m_inlineStrategy->DumpXml(file); fclose(file); } else { m_inlineStrategy->DumpXml(); } } else { m_inlineStrategy->DumpXml(); } #endif #ifdef DEBUG if (opts.dspOrder) { // mdMethodDef __stdcall CEEInfo::getMethodDefFromMethod(CORINFO_METHOD_HANDLE hMethod) mdMethodDef currentMethodToken = info.compCompHnd->getMethodDefFromMethod(info.compMethodHnd); static bool headerPrinted = false; if (!headerPrinted) { // clang-format off headerPrinted = true; printf(" | Profiled | Method | Method has | calls | Num |LclV |AProp| CSE | Perf |bytes | %3s codesize| \n", Target::g_tgtCPUName); printf(" mdToken | CNT | RGN | Hash | EH | FRM | LOOP | NRM | IND | BBs | Cnt | Cnt | Cnt | Score | IL | HOT | CLD | method name \n"); printf("---------+------+------+----------+----+-----+------+-----+-----+-----+-----+-----+-----+---------+------+-------+-----+\n"); // 06001234 | 1234 | HOT | 0f1e2d3c | EH | ebp | LOOP | 15 | 6 | 12 | 17 | 12 | 8 | 1234.56 | 145 | 1234 | 123 | System.Example(int) // clang-format on } printf("%08X | ", currentMethodToken); if (fgHaveProfileData()) { if (fgCalledCount < 1000) { printf("%4.0f | ", fgCalledCount); } else if (fgCalledCount < 1000000) { printf("%3.0fK | ", fgCalledCount / 1000); } else { printf("%3.0fM | ", fgCalledCount / 1000000); } } else { printf(" | "); } CorInfoRegionKind regionKind = info.compMethodInfo->regionKind; if (opts.altJit) { printf("ALT | "); } else if (regionKind == CORINFO_REGION_NONE) { printf(" | "); } else if (regionKind == CORINFO_REGION_HOT) { printf(" HOT | "); } else if (regionKind == CORINFO_REGION_COLD) { printf("COLD | "); } else if (regionKind == CORINFO_REGION_JIT) { printf(" JIT | "); } else { printf("UNKN | "); } printf("%08x | ", info.compMethodHash()); if (compHndBBtabCount > 0) { printf("EH | "); } else { printf(" | "); } if (rpFrameType == FT_EBP_FRAME) { printf("%3s | ", STR_FPBASE); } else if (rpFrameType == FT_ESP_FRAME) { printf("%3s | ", STR_SPBASE); } #if DOUBLE_ALIGN else if (rpFrameType == FT_DOUBLE_ALIGN_FRAME) { printf("dbl | "); } #endif else // (rpFrameType == FT_NOT_SET) { printf("??? | "); } if (fgHasLoops) { printf("LOOP |"); } else { printf(" |"); } printf(" %3d |", optCallCount); printf(" %3d |", optIndirectCallCount); printf(" %3d |", fgBBcountAtCodegen); printf(" %3d |", lvaCount); if (opts.MinOpts()) { printf(" MinOpts |"); } else { printf(" %3d |", optAssertionCount); printf(" %3d |", optCSEcount); } if (info.compPerfScore < 9999.995) { printf(" %7.2f |", info.compPerfScore); } else { printf(" %7.0f |", info.compPerfScore); } printf(" %4d |", info.compMethodInfo->ILCodeSize); printf(" %5d |", info.compTotalHotCodeSize); printf(" %3d |", info.compTotalColdCodeSize); printf(" %s\n", eeGetMethodFullName(info.compMethodHnd)); printf(""); // in our logic this causes a flush } if (verbose) { printf("****** DONE compiling %s\n", info.compFullName); printf(""); // in our logic this causes a flush } #if TRACK_ENREG_STATS for (unsigned i = 0; i < lvaCount; ++i) { const LclVarDsc* varDsc = lvaGetDesc(i); if (varDsc->lvRefCnt() != 0) { s_enregisterStats.RecordLocal(varDsc); } } #endif // TRACK_ENREG_STATS // Only call _DbgBreakCheck when we are jitting, not when we are ngen-ing // For ngen the int3 or breakpoint instruction will be right at the // start of the ngen method and we will stop when we execute it. // if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if (compJitHaltMethod()) { #if !defined(HOST_UNIX) // TODO-UNIX: re-enable this when we have an OS that supports a pop-up dialog // Don't do an assert, but just put up the dialog box so we get just-in-time debugger // launching. When you hit 'retry' it will continue and naturally stop at the INT 3 // that the JIT put in the code _DbgBreakCheck(__FILE__, __LINE__, "JitHalt"); #endif } } #endif // DEBUG } #ifdef PSEUDORANDOM_NOP_INSERTION // this is zlib adler32 checksum. source came from windows base #define BASE 65521L // largest prime smaller than 65536 #define NMAX 5552 // NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 #define DO1(buf, i) \ { \ s1 += buf[i]; \ s2 += s1; \ } #define DO2(buf, i) \ DO1(buf, i); \ DO1(buf, i + 1); #define DO4(buf, i) \ DO2(buf, i); \ DO2(buf, i + 2); #define DO8(buf, i) \ DO4(buf, i); \ DO4(buf, i + 4); #define DO16(buf) \ DO8(buf, 0); \ DO8(buf, 8); unsigned adler32(unsigned adler, char* buf, unsigned int len) { unsigned int s1 = adler & 0xffff; unsigned int s2 = (adler >> 16) & 0xffff; int k; if (buf == NULL) return 1L; while (len > 0) { k = len < NMAX ? len : NMAX; len -= k; while (k >= 16) { DO16(buf); buf += 16; k -= 16; } if (k != 0) do { s1 += *buf++; s2 += s1; } while (--k); s1 %= BASE; s2 %= BASE; } return (s2 << 16) | s1; } #endif unsigned getMethodBodyChecksum(_In_z_ char* code, int size) { #ifdef PSEUDORANDOM_NOP_INSERTION return adler32(0, code, size); #else return 0; #endif } int Compiler::compCompileHelper(CORINFO_MODULE_HANDLE classPtr, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags) { CORINFO_METHOD_HANDLE methodHnd = info.compMethodHnd; info.compCode = methodInfo->ILCode; info.compILCodeSize = methodInfo->ILCodeSize; info.compILImportSize = 0; if (info.compILCodeSize == 0) { BADCODE("code size is zero"); } if (compIsForInlining()) { #ifdef DEBUG unsigned methAttr_Old = impInlineInfo->inlineCandidateInfo->methAttr; unsigned methAttr_New = info.compCompHnd->getMethodAttribs(info.compMethodHnd); unsigned flagsToIgnore = CORINFO_FLG_DONT_INLINE | CORINFO_FLG_FORCEINLINE; assert((methAttr_Old & (~flagsToIgnore)) == (methAttr_New & (~flagsToIgnore))); #endif info.compFlags = impInlineInfo->inlineCandidateInfo->methAttr; compInlineContext = impInlineInfo->inlineContext; } else { info.compFlags = info.compCompHnd->getMethodAttribs(info.compMethodHnd); #ifdef PSEUDORANDOM_NOP_INSERTION info.compChecksum = getMethodBodyChecksum((char*)methodInfo->ILCode, methodInfo->ILCodeSize); #endif compInlineContext = m_inlineStrategy->GetRootContext(); } compSwitchedToOptimized = false; compSwitchedToMinOpts = false; // compInitOptions will set the correct verbose flag. compInitOptions(compileFlags); if (!compIsForInlining() && !opts.altJit && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT)) { // We're an altjit, but the COMPlus_AltJit configuration did not say to compile this method, // so skip it. return CORJIT_SKIPPED; } #ifdef DEBUG if (verbose) { printf("IL to import:\n"); dumpILRange(info.compCode, info.compILCodeSize); } #endif // Check for COMPlus_AggressiveInlining if (JitConfig.JitAggressiveInlining()) { compDoAggressiveInlining = true; } if (compDoAggressiveInlining) { info.compFlags |= CORINFO_FLG_FORCEINLINE; } #ifdef DEBUG // Check for ForceInline stress. if (compStressCompile(STRESS_FORCE_INLINE, 0)) { info.compFlags |= CORINFO_FLG_FORCEINLINE; } if (compIsForInlining()) { JITLOG((LL_INFO100000, "\nINLINER impTokenLookupContextHandle for %s is 0x%p.\n", eeGetMethodFullName(info.compMethodHnd), dspPtr(impTokenLookupContextHandle))); } #endif // DEBUG impCanReimport = compStressCompile(STRESS_CHK_REIMPORT, 15); /* Initialize set a bunch of global values */ info.compScopeHnd = classPtr; info.compXcptnsCount = methodInfo->EHcount; info.compMaxStack = methodInfo->maxStack; compHndBBtab = nullptr; compHndBBtabCount = 0; compHndBBtabAllocCount = 0; info.compNativeCodeSize = 0; info.compTotalHotCodeSize = 0; info.compTotalColdCodeSize = 0; info.compClassProbeCount = 0; compHasBackwardJump = false; compHasBackwardJumpInHandler = false; #ifdef DEBUG compCurBB = nullptr; lvaTable = nullptr; // Reset node and block ID counter compGenTreeID = 0; compStatementID = 0; compBasicBlockID = 0; #endif /* Initialize emitter */ if (!compIsForInlining()) { codeGen->GetEmitter()->emitBegCG(this, compHnd); } info.compIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0; info.compPublishStubParam = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PUBLISH_SECRET_PARAM); info.compHasNextCallRetAddr = false; if (opts.IsReversePInvoke()) { bool unused; info.compCallConv = info.compCompHnd->getUnmanagedCallConv(methodInfo->ftn, nullptr, &unused); info.compArgOrder = Target::g_tgtUnmanagedArgOrder; } else { info.compCallConv = CorInfoCallConvExtension::Managed; info.compArgOrder = Target::g_tgtArgOrder; } info.compIsVarArgs = false; switch (methodInfo->args.getCallConv()) { case CORINFO_CALLCONV_NATIVEVARARG: case CORINFO_CALLCONV_VARARG: info.compIsVarArgs = true; break; default: break; } info.compRetNativeType = info.compRetType = JITtype2varType(methodInfo->args.retType); info.compUnmanagedCallCountWithGCTransition = 0; info.compLvFrameListRoot = BAD_VAR_NUM; info.compInitMem = ((methodInfo->options & CORINFO_OPT_INIT_LOCALS) != 0); /* Allocate the local variable table */ lvaInitTypeRef(); compInitDebuggingInfo(); // If are an altjit and have patchpoint info, we might need to tweak the frame size // so it's plausible for the altjit architecture. // if (!info.compMatchedVM && compileFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { assert(info.compLocalsCount == info.compPatchpointInfo->NumberOfLocals()); const int totalFrameSize = info.compPatchpointInfo->TotalFrameSize(); int frameSizeUpdate = 0; #if defined(TARGET_AMD64) if ((totalFrameSize % 16) != 8) { frameSizeUpdate = 8; } #elif defined(TARGET_ARM64) if ((totalFrameSize % 16) != 0) { frameSizeUpdate = 8; } #endif if (frameSizeUpdate != 0) { JITDUMP("Mismatched altjit + OSR -- updating tier0 frame size from %d to %d\n", totalFrameSize, totalFrameSize + frameSizeUpdate); // Allocate a local copy with altered frame size. // const unsigned patchpointInfoSize = PatchpointInfo::ComputeSize(info.compLocalsCount); PatchpointInfo* const newInfo = (PatchpointInfo*)getAllocator(CMK_Unknown).allocate<char>(patchpointInfoSize); newInfo->Initialize(info.compLocalsCount, totalFrameSize + frameSizeUpdate); newInfo->Copy(info.compPatchpointInfo); // Swap it in place. // info.compPatchpointInfo = newInfo; } } #ifdef DEBUG if (compIsForInlining()) { compBasicBlockID = impInlineInfo->InlinerCompiler->compBasicBlockID; } #endif const bool forceInline = !!(info.compFlags & CORINFO_FLG_FORCEINLINE); if (!compIsForInlining() && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { // We're prejitting the root method. We also will analyze it as // a potential inline candidate. InlineResult prejitResult(this, methodHnd, "prejit"); // Profile data allows us to avoid early "too many IL bytes" outs. prejitResult.NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, fgHaveSufficientProfileData()); // Do the initial inline screen. impCanInlineIL(methodHnd, methodInfo, forceInline, &prejitResult); // Temporarily install the prejitResult as the // compInlineResult so it's available to fgFindJumpTargets // and can accumulate more observations as the IL is // scanned. // // We don't pass prejitResult in as a parameter to avoid // potential aliasing confusion -- the other call to // fgFindBasicBlocks may have set up compInlineResult and // the code in fgFindJumpTargets references that data // member extensively. assert(compInlineResult == nullptr); assert(impInlineInfo == nullptr); compInlineResult = &prejitResult; // Find the basic blocks. We must do this regardless of // inlineability, since we are prejitting this method. // // This will also update the status of this method as // an inline candidate. fgFindBasicBlocks(); // Undo the temporary setup. assert(compInlineResult == &prejitResult); compInlineResult = nullptr; // If still a viable, discretionary inline, assess // profitability. if (prejitResult.IsDiscretionaryCandidate()) { prejitResult.DetermineProfitability(methodInfo); } m_inlineStrategy->NotePrejitDecision(prejitResult); // Handle the results of the inline analysis. if (prejitResult.IsFailure()) { // This method is a bad inlinee according to our // analysis. We will let the InlineResult destructor // mark it as noinline in the prejit image to save the // jit some work. // // This decision better not be context-dependent. assert(prejitResult.IsNever()); } else { // This looks like a viable inline candidate. Since // we're not actually inlining, don't report anything. prejitResult.SetReported(); } } else { // We are jitting the root method, or inlining. fgFindBasicBlocks(); // If we are doing OSR, update flow to initially reach the appropriate IL offset. // if (opts.IsOSR()) { fgFixEntryFlowForOSR(); } } // If we're inlining and the candidate is bad, bail out. if (compDonotInline()) { goto _Next; } // We may decide to optimize this method, // to avoid spending a long time stuck in Tier0 code. // if (fgCanSwitchToOptimized()) { // We only expect to be able to do this at Tier0. // assert(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)); // Normal tiering should bail us out of Tier0 tail call induced loops. // So keep these methods in Tier0 if we're gathering PGO data. // If we're not gathering PGO, then switch these to optimized to // minimize the number of tail call helper stubs we might need. // Reconsider this if/when we're able to share those stubs. // // Honor the config setting that tells the jit to // always optimize methods with loops. // // If neither of those apply, and OSR is enabled, the jit may still // decide to optimize, if there's something in the method that // OSR currently cannot handle, or we're optionally suppressing // OSR by method hash. // const char* reason = nullptr; if (compTailPrefixSeen && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { reason = "tail.call and not BBINSTR"; } else if (compHasBackwardJump && ((info.compFlags & CORINFO_FLG_DISABLE_TIER0_FOR_LOOPS) != 0)) { reason = "loop"; } if (compHasBackwardJump && (reason == nullptr) && (JitConfig.TC_OnStackReplacement() > 0)) { const char* noPatchpointReason = nullptr; bool canEscapeViaOSR = compCanHavePatchpoints(&reason); #ifdef DEBUG if (canEscapeViaOSR) { // Optionally disable OSR by method hash. This will force any // method that might otherwise get trapped in Tier0 to be optimized. // static ConfigMethodRange JitEnableOsrRange; JitEnableOsrRange.EnsureInit(JitConfig.JitEnableOsrRange()); const unsigned hash = impInlineRoot()->info.compMethodHash(); if (!JitEnableOsrRange.Contains(hash)) { canEscapeViaOSR = false; reason = "OSR disabled by JitEnableOsrRange"; } } #endif if (canEscapeViaOSR) { JITDUMP("\nOSR enabled for this method\n"); } else { JITDUMP("\nOSR disabled for this method: %s\n", noPatchpointReason); assert(reason != nullptr); } } if (reason != nullptr) { fgSwitchToOptimized(reason); } } compSetOptimizationLevel(); #if COUNT_BASIC_BLOCKS bbCntTable.record(fgBBcount); if (fgBBcount == 1) { bbOneBBSizeTable.record(methodInfo->ILCodeSize); } #endif // COUNT_BASIC_BLOCKS #ifdef DEBUG if (verbose) { printf("Basic block list for '%s'\n", info.compFullName); fgDispBasicBlocks(); } #endif #ifdef DEBUG /* Give the function a unique number */ if (opts.disAsm || verbose) { compMethodID = ~info.compMethodHash() & 0xffff; } else { compMethodID = InterlockedIncrement(&s_compMethodsCount); } #endif if (compIsForInlining()) { compInlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_BASIC_BLOCKS, fgBBcount); if (compInlineResult->IsFailure()) { goto _Next; } } #ifdef DEBUG if ((JitConfig.DumpJittedMethods() == 1) && !compIsForInlining()) { enum { BUFSIZE = 20 }; char osrBuffer[BUFSIZE] = {0}; if (opts.IsOSR()) { // Tiering name already includes "OSR", we just want the IL offset // sprintf_s(osrBuffer, BUFSIZE, " @0x%x", info.compILEntry); } printf("Compiling %4d %s::%s, IL size = %u, hash=0x%08x %s%s%s\n", Compiler::jitTotalMethodCompiled, info.compClassName, info.compMethodName, info.compILCodeSize, info.compMethodHash(), compGetTieringName(), osrBuffer, compGetStressMessage()); } if (compIsForInlining()) { compGenTreeID = impInlineInfo->InlinerCompiler->compGenTreeID; compStatementID = impInlineInfo->InlinerCompiler->compStatementID; } #endif compCompile(methodCodePtr, methodCodeSize, compileFlags); #ifdef DEBUG if (compIsForInlining()) { impInlineInfo->InlinerCompiler->compGenTreeID = compGenTreeID; impInlineInfo->InlinerCompiler->compStatementID = compStatementID; impInlineInfo->InlinerCompiler->compBasicBlockID = compBasicBlockID; } #endif _Next: if (compDonotInline()) { // Verify we have only one inline result in play. assert(impInlineInfo->inlineResult == compInlineResult); } if (!compIsForInlining()) { compCompileFinish(); // Did we just compile for a target architecture that the VM isn't expecting? If so, the VM // can't used the generated code (and we better be an AltJit!). if (!info.compMatchedVM) { return CORJIT_SKIPPED; } #ifdef DEBUG if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT) && JitConfig.RunAltJitCode() == 0) { return CORJIT_SKIPPED; } #endif // DEBUG } /* Success! */ return CORJIT_OK; } //------------------------------------------------------------------------ // compFindLocalVarLinear: Linear search for variable's scope containing offset. // // Arguments: // varNum The variable number to search for in the array of scopes. // offs The offset value which should occur within the life of the variable. // // Return Value: // VarScopeDsc* of a matching variable that contains the offset within its life // begin and life end or nullptr when there is no match found. // // Description: // Linear search for matching variables with their life begin and end containing // the offset. // or NULL if one couldn't be found. // // Note: // Usually called for scope count = 4. Could be called for values upto 8. // VarScopeDsc* Compiler::compFindLocalVarLinear(unsigned varNum, unsigned offs) { for (unsigned i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* dsc = &info.compVarScopes[i]; if ((dsc->vsdVarNum == varNum) && (dsc->vsdLifeBeg <= offs) && (dsc->vsdLifeEnd > offs)) { return dsc; } } return nullptr; } //------------------------------------------------------------------------ // compFindLocalVar: Search for variable's scope containing offset. // // Arguments: // varNum The variable number to search for in the array of scopes. // offs The offset value which should occur within the life of the variable. // // Return Value: // VarScopeDsc* of a matching variable that contains the offset within its life // begin and life end. // or NULL if one couldn't be found. // // Description: // Linear search for matching variables with their life begin and end containing // the offset only when the scope count is < MAX_LINEAR_FIND_LCL_SCOPELIST, // else use the hashtable lookup. // VarScopeDsc* Compiler::compFindLocalVar(unsigned varNum, unsigned offs) { if (info.compVarScopesCount < MAX_LINEAR_FIND_LCL_SCOPELIST) { return compFindLocalVarLinear(varNum, offs); } else { VarScopeDsc* ret = compFindLocalVar(varNum, offs, offs); assert(ret == compFindLocalVarLinear(varNum, offs)); return ret; } } //------------------------------------------------------------------------ // compFindLocalVar: Search for variable's scope containing offset. // // Arguments: // varNum The variable number to search for in the array of scopes. // lifeBeg The life begin of the variable's scope // lifeEnd The life end of the variable's scope // // Return Value: // VarScopeDsc* of a matching variable that contains the offset within its life // begin and life end, or NULL if one couldn't be found. // // Description: // Following are the steps used: // 1. Index into the hashtable using varNum. // 2. Iterate through the linked list at index varNum to find a matching // var scope. // VarScopeDsc* Compiler::compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd) { assert(compVarScopeMap != nullptr); VarScopeMapInfo* info; if (compVarScopeMap->Lookup(varNum, &info)) { VarScopeListNode* list = info->head; while (list != nullptr) { if ((list->data->vsdLifeBeg <= lifeBeg) && (list->data->vsdLifeEnd > lifeEnd)) { return list->data; } list = list->next; } } return nullptr; } //------------------------------------------------------------------------- // compInitVarScopeMap: Create a scope map so it can be looked up by varNum // // Description: // Map.K => Map.V :: varNum => List(ScopeDsc) // // Create a scope map that can be indexed by varNum and can be iterated // on it's values to look for matching scope when given an offs or // lifeBeg and lifeEnd. // // Notes: // 1. Build the map only when we think linear search is slow, i.e., // MAX_LINEAR_FIND_LCL_SCOPELIST is large. // 2. Linked list preserves original array order. // void Compiler::compInitVarScopeMap() { if (info.compVarScopesCount < MAX_LINEAR_FIND_LCL_SCOPELIST) { return; } assert(compVarScopeMap == nullptr); compVarScopeMap = new (getAllocator()) VarNumToScopeDscMap(getAllocator()); // 599 prime to limit huge allocations; for ex: duplicated scopes on single var. compVarScopeMap->Reallocate(min(info.compVarScopesCount, 599)); for (unsigned i = 0; i < info.compVarScopesCount; ++i) { unsigned varNum = info.compVarScopes[i].vsdVarNum; VarScopeListNode* node = VarScopeListNode::Create(&info.compVarScopes[i], getAllocator()); // Index by varNum and if the list exists append "node" to the "list". VarScopeMapInfo* info; if (compVarScopeMap->Lookup(varNum, &info)) { info->tail->next = node; info->tail = node; } // Create a new list. else { info = VarScopeMapInfo::Create(node, getAllocator()); compVarScopeMap->Set(varNum, info); } } } struct genCmpLocalVarLifeBeg { bool operator()(const VarScopeDsc* elem1, const VarScopeDsc* elem2) { return elem1->vsdLifeBeg < elem2->vsdLifeBeg; } }; struct genCmpLocalVarLifeEnd { bool operator()(const VarScopeDsc* elem1, const VarScopeDsc* elem2) { return elem1->vsdLifeEnd < elem2->vsdLifeEnd; } }; inline void Compiler::compInitScopeLists() { if (info.compVarScopesCount == 0) { compEnterScopeList = compExitScopeList = nullptr; return; } // Populate the 'compEnterScopeList' and 'compExitScopeList' lists compEnterScopeList = new (this, CMK_DebugInfo) VarScopeDsc*[info.compVarScopesCount]; compExitScopeList = new (this, CMK_DebugInfo) VarScopeDsc*[info.compVarScopesCount]; for (unsigned i = 0; i < info.compVarScopesCount; i++) { compEnterScopeList[i] = compExitScopeList[i] = &info.compVarScopes[i]; } jitstd::sort(compEnterScopeList, compEnterScopeList + info.compVarScopesCount, genCmpLocalVarLifeBeg()); jitstd::sort(compExitScopeList, compExitScopeList + info.compVarScopesCount, genCmpLocalVarLifeEnd()); } void Compiler::compResetScopeLists() { if (info.compVarScopesCount == 0) { return; } assert(compEnterScopeList && compExitScopeList); compNextEnterScope = compNextExitScope = 0; } VarScopeDsc* Compiler::compGetNextEnterScope(unsigned offs, bool scan) { assert(info.compVarScopesCount); assert(compEnterScopeList && compExitScopeList); if (compNextEnterScope < info.compVarScopesCount) { assert(compEnterScopeList[compNextEnterScope]); unsigned nextEnterOff = compEnterScopeList[compNextEnterScope]->vsdLifeBeg; assert(scan || (offs <= nextEnterOff)); if (!scan) { if (offs == nextEnterOff) { return compEnterScopeList[compNextEnterScope++]; } } else { if (nextEnterOff <= offs) { return compEnterScopeList[compNextEnterScope++]; } } } return nullptr; } VarScopeDsc* Compiler::compGetNextExitScope(unsigned offs, bool scan) { assert(info.compVarScopesCount); assert(compEnterScopeList && compExitScopeList); if (compNextExitScope < info.compVarScopesCount) { assert(compExitScopeList[compNextExitScope]); unsigned nextExitOffs = compExitScopeList[compNextExitScope]->vsdLifeEnd; assert(scan || (offs <= nextExitOffs)); if (!scan) { if (offs == nextExitOffs) { return compExitScopeList[compNextExitScope++]; } } else { if (nextExitOffs <= offs) { return compExitScopeList[compNextExitScope++]; } } } return nullptr; } // The function will call the callback functions for scopes with boundaries // at instrs from the current status of the scope lists to 'offset', // ordered by instrs. void Compiler::compProcessScopesUntil(unsigned offset, VARSET_TP* inScope, void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*), void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*)) { assert(offset != BAD_IL_OFFSET); assert(inScope != nullptr); bool foundExit = false, foundEnter = true; VarScopeDsc* scope; VarScopeDsc* nextExitScope = nullptr; VarScopeDsc* nextEnterScope = nullptr; unsigned offs = offset, curEnterOffs = 0; goto START_FINDING_SCOPES; // We need to determine the scopes which are open for the current block. // This loop walks over the missing blocks between the current and the // previous block, keeping the enter and exit offsets in lockstep. do { foundExit = foundEnter = false; if (nextExitScope) { (this->*exitScopeFn)(inScope, nextExitScope); nextExitScope = nullptr; foundExit = true; } offs = nextEnterScope ? nextEnterScope->vsdLifeBeg : offset; while ((scope = compGetNextExitScope(offs, true)) != nullptr) { foundExit = true; if (!nextEnterScope || scope->vsdLifeEnd > nextEnterScope->vsdLifeBeg) { // We overshot the last found Enter scope. Save the scope for later // and find an entering scope nextExitScope = scope; break; } (this->*exitScopeFn)(inScope, scope); } if (nextEnterScope) { (this->*enterScopeFn)(inScope, nextEnterScope); curEnterOffs = nextEnterScope->vsdLifeBeg; nextEnterScope = nullptr; foundEnter = true; } offs = nextExitScope ? nextExitScope->vsdLifeEnd : offset; START_FINDING_SCOPES: while ((scope = compGetNextEnterScope(offs, true)) != nullptr) { foundEnter = true; if ((nextExitScope && scope->vsdLifeBeg >= nextExitScope->vsdLifeEnd) || (scope->vsdLifeBeg > curEnterOffs)) { // We overshot the last found exit scope. Save the scope for later // and find an exiting scope nextEnterScope = scope; break; } (this->*enterScopeFn)(inScope, scope); if (!nextExitScope) { curEnterOffs = scope->vsdLifeBeg; } } } while (foundExit || foundEnter); } #if defined(DEBUG) void Compiler::compDispScopeLists() { unsigned i; printf("Local variable scopes = %d\n", info.compVarScopesCount); if (info.compVarScopesCount) { printf(" \tVarNum \tLVNum \t Name \tBeg \tEnd\n"); } printf("Sorted by enter scope:\n"); for (i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* varScope = compEnterScopeList[i]; assert(varScope); printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh", i, varScope->vsdVarNum, varScope->vsdLVnum, VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName), varScope->vsdLifeBeg, varScope->vsdLifeEnd); if (compNextEnterScope == i) { printf(" <-- next enter scope"); } printf("\n"); } printf("Sorted by exit scope:\n"); for (i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* varScope = compExitScopeList[i]; assert(varScope); printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh", i, varScope->vsdVarNum, varScope->vsdLVnum, VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName), varScope->vsdLifeBeg, varScope->vsdLifeEnd); if (compNextExitScope == i) { printf(" <-- next exit scope"); } printf("\n"); } } void Compiler::compDispLocalVars() { printf("info.compVarScopesCount = %d\n", info.compVarScopesCount); if (info.compVarScopesCount > 0) { printf(" \tVarNum \tLVNum \t Name \tBeg \tEnd\n"); } for (unsigned i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* varScope = &info.compVarScopes[i]; printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh\n", i, varScope->vsdVarNum, varScope->vsdLVnum, VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName), varScope->vsdLifeBeg, varScope->vsdLifeEnd); } } #endif // DEBUG /*****************************************************************************/ #if MEASURE_CLRAPI_CALLS struct WrapICorJitInfo : public ICorJitInfo { //------------------------------------------------------------------------ // WrapICorJitInfo::makeOne: allocate an instance of WrapICorJitInfo // // Arguments: // alloc - the allocator to get memory from for the instance // compile - the compiler instance // compHndRef - the ICorJitInfo handle from the EE; the caller's // copy may be replaced with a "wrapper" instance // // Return Value: // If the config flags indicate that ICorJitInfo should be wrapped, // we return the "wrapper" instance; otherwise we return "nullptr". static WrapICorJitInfo* makeOne(ArenaAllocator* alloc, Compiler* compiler, COMP_HANDLE& compHndRef /* INOUT */) { WrapICorJitInfo* wrap = nullptr; if (JitConfig.JitEECallTimingInfo() != 0) { // It's too early to use the default allocator, so we do this // in two steps to be safe (the constructor doesn't need to do // anything except fill in the vtable pointer, so we let the // compiler do it). void* inst = alloc->allocateMemory(roundUp(sizeof(WrapICorJitInfo))); if (inst != nullptr) { // If you get a build error here due to 'WrapICorJitInfo' being // an abstract class, it's very likely that the wrapper bodies // in ICorJitInfo_API_wrapper.hpp are no longer in sync with // the EE interface; please be kind and update the header file. wrap = new (inst, jitstd::placement_t()) WrapICorJitInfo(); wrap->wrapComp = compiler; // Save the real handle and replace it with our wrapped version. wrap->wrapHnd = compHndRef; compHndRef = wrap; } } return wrap; } private: Compiler* wrapComp; COMP_HANDLE wrapHnd; // the "real thing" public: #include "ICorJitInfo_API_wrapper.hpp" }; #endif // MEASURE_CLRAPI_CALLS /*****************************************************************************/ // Compile a single method int jitNativeCode(CORINFO_METHOD_HANDLE methodHnd, CORINFO_MODULE_HANDLE classPtr, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags, void* inlineInfoPtr) { // // A non-NULL inlineInfo means we are compiling the inlinee method. // InlineInfo* inlineInfo = (InlineInfo*)inlineInfoPtr; bool jitFallbackCompile = false; START: int result = CORJIT_INTERNALERROR; ArenaAllocator* pAlloc = nullptr; ArenaAllocator alloc; #if MEASURE_CLRAPI_CALLS WrapICorJitInfo* wrapCLR = nullptr; #endif if (inlineInfo) { // Use inliner's memory allocator when compiling the inlinee. pAlloc = inlineInfo->InlinerCompiler->compGetArenaAllocator(); } else { pAlloc = &alloc; } Compiler* pComp; pComp = nullptr; struct Param { Compiler* pComp; ArenaAllocator* pAlloc; bool jitFallbackCompile; CORINFO_METHOD_HANDLE methodHnd; CORINFO_MODULE_HANDLE classPtr; COMP_HANDLE compHnd; CORINFO_METHOD_INFO* methodInfo; void** methodCodePtr; uint32_t* methodCodeSize; JitFlags* compileFlags; InlineInfo* inlineInfo; #if MEASURE_CLRAPI_CALLS WrapICorJitInfo* wrapCLR; #endif int result; } param; param.pComp = nullptr; param.pAlloc = pAlloc; param.jitFallbackCompile = jitFallbackCompile; param.methodHnd = methodHnd; param.classPtr = classPtr; param.compHnd = compHnd; param.methodInfo = methodInfo; param.methodCodePtr = methodCodePtr; param.methodCodeSize = methodCodeSize; param.compileFlags = compileFlags; param.inlineInfo = inlineInfo; #if MEASURE_CLRAPI_CALLS param.wrapCLR = nullptr; #endif param.result = result; setErrorTrap(compHnd, Param*, pParamOuter, &param) { setErrorTrap(nullptr, Param*, pParam, pParamOuter) { if (pParam->inlineInfo) { // Lazily create the inlinee compiler object if (pParam->inlineInfo->InlinerCompiler->InlineeCompiler == nullptr) { pParam->inlineInfo->InlinerCompiler->InlineeCompiler = (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp))); } // Use the inlinee compiler object pParam->pComp = pParam->inlineInfo->InlinerCompiler->InlineeCompiler; #ifdef DEBUG // memset(pParam->pComp, 0xEE, sizeof(Compiler)); #endif } else { // Allocate create the inliner compiler object pParam->pComp = (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp))); } #if MEASURE_CLRAPI_CALLS pParam->wrapCLR = WrapICorJitInfo::makeOne(pParam->pAlloc, pParam->pComp, pParam->compHnd); #endif // push this compiler on the stack (TLS) pParam->pComp->prevCompiler = JitTls::GetCompiler(); JitTls::SetCompiler(pParam->pComp); // PREFIX_ASSUME gets turned into ASSERT_CHECK and we cannot have it here #if defined(_PREFAST_) || defined(_PREFIX_) PREFIX_ASSUME(pParam->pComp != NULL); #else assert(pParam->pComp != nullptr); #endif pParam->pComp->compInit(pParam->pAlloc, pParam->methodHnd, pParam->compHnd, pParam->methodInfo, pParam->inlineInfo); #ifdef DEBUG pParam->pComp->jitFallbackCompile = pParam->jitFallbackCompile; #endif // Now generate the code pParam->result = pParam->pComp->compCompile(pParam->classPtr, pParam->methodCodePtr, pParam->methodCodeSize, pParam->compileFlags); } finallyErrorTrap() { Compiler* pCompiler = pParamOuter->pComp; // If OOM is thrown when allocating memory for a pComp, we will end up here. // For this case, pComp and also pCompiler will be a nullptr // if (pCompiler != nullptr) { pCompiler->info.compCode = nullptr; // pop the compiler off the TLS stack only if it was linked above assert(JitTls::GetCompiler() == pCompiler); JitTls::SetCompiler(pCompiler->prevCompiler); } if (pParamOuter->inlineInfo == nullptr) { // Free up the allocator we were using pParamOuter->pAlloc->destroy(); } } endErrorTrap() } impJitErrorTrap() { // If we were looking at an inlinee.... if (inlineInfo != nullptr) { // Note that we failed to compile the inlinee, and that // there's no point trying to inline it again anywhere else. inlineInfo->inlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR); } param.result = __errc; } endErrorTrap() result = param.result; if (!inlineInfo && (result == CORJIT_INTERNALERROR || result == CORJIT_RECOVERABLEERROR || result == CORJIT_IMPLLIMITATION) && !jitFallbackCompile) { // If we failed the JIT, reattempt with debuggable code. jitFallbackCompile = true; // Update the flags for 'safer' code generation. compileFlags->Set(JitFlags::JIT_FLAG_MIN_OPT); compileFlags->Clear(JitFlags::JIT_FLAG_SIZE_OPT); compileFlags->Clear(JitFlags::JIT_FLAG_SPEED_OPT); goto START; } return result; } #if defined(UNIX_AMD64_ABI) // GetTypeFromClassificationAndSizes: // Returns the type of the eightbyte accounting for the classification and size of the eightbyte. // // args: // classType: classification type // size: size of the eightbyte. // // static var_types Compiler::GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size) { var_types type = TYP_UNKNOWN; switch (classType) { case SystemVClassificationTypeInteger: if (size == 1) { type = TYP_BYTE; } else if (size <= 2) { type = TYP_SHORT; } else if (size <= 4) { type = TYP_INT; } else if (size <= 8) { type = TYP_LONG; } else { assert(false && "GetTypeFromClassificationAndSizes Invalid Integer classification type."); } break; case SystemVClassificationTypeIntegerReference: type = TYP_REF; break; case SystemVClassificationTypeIntegerByRef: type = TYP_BYREF; break; case SystemVClassificationTypeSSE: if (size <= 4) { type = TYP_FLOAT; } else if (size <= 8) { type = TYP_DOUBLE; } else { assert(false && "GetTypeFromClassificationAndSizes Invalid SSE classification type."); } break; default: assert(false && "GetTypeFromClassificationAndSizes Invalid classification type."); break; } return type; } //------------------------------------------------------------------- // GetEightByteType: Returns the type of eightbyte slot of a struct // // Arguments: // structDesc - struct classification description. // slotNum - eightbyte slot number for the struct. // // Return Value: // type of the eightbyte slot of the struct // // static var_types Compiler::GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, unsigned slotNum) { var_types eightByteType = TYP_UNDEF; unsigned len = structDesc.eightByteSizes[slotNum]; switch (structDesc.eightByteClassifications[slotNum]) { case SystemVClassificationTypeInteger: // See typelist.h for jit type definition. // All the types of size < 4 bytes are of jit type TYP_INT. if (structDesc.eightByteSizes[slotNum] <= 4) { eightByteType = TYP_INT; } else if (structDesc.eightByteSizes[slotNum] <= 8) { eightByteType = TYP_LONG; } else { assert(false && "GetEightByteType Invalid Integer classification type."); } break; case SystemVClassificationTypeIntegerReference: assert(len == REGSIZE_BYTES); eightByteType = TYP_REF; break; case SystemVClassificationTypeIntegerByRef: assert(len == REGSIZE_BYTES); eightByteType = TYP_BYREF; break; case SystemVClassificationTypeSSE: if (structDesc.eightByteSizes[slotNum] <= 4) { eightByteType = TYP_FLOAT; } else if (structDesc.eightByteSizes[slotNum] <= 8) { eightByteType = TYP_DOUBLE; } else { assert(false && "GetEightByteType Invalid SSE classification type."); } break; default: assert(false && "GetEightByteType Invalid classification type."); break; } return eightByteType; } //------------------------------------------------------------------------------------------------------ // GetStructTypeOffset: Gets the type, size and offset of the eightbytes of a struct for System V systems. // // Arguments: // 'structDesc' - struct description // 'type0' - out param; returns the type of the first eightbyte. // 'type1' - out param; returns the type of the second eightbyte. // 'offset0' - out param; returns the offset of the first eightbyte. // 'offset1' - out param; returns the offset of the second eightbyte. // // static void Compiler::GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1) { *offset0 = structDesc.eightByteOffsets[0]; *offset1 = structDesc.eightByteOffsets[1]; *type0 = TYP_UNKNOWN; *type1 = TYP_UNKNOWN; // Set the first eightbyte data if (structDesc.eightByteCount >= 1) { *type0 = GetEightByteType(structDesc, 0); } // Set the second eight byte data if (structDesc.eightByteCount == 2) { *type1 = GetEightByteType(structDesc, 1); } } //------------------------------------------------------------------------------------------------------ // GetStructTypeOffset: Gets the type, size and offset of the eightbytes of a struct for System V systems. // // Arguments: // 'typeHnd' - type handle // 'type0' - out param; returns the type of the first eightbyte. // 'type1' - out param; returns the type of the second eightbyte. // 'offset0' - out param; returns the offset of the first eightbyte. // 'offset1' - out param; returns the offset of the second eightbyte. // void Compiler::GetStructTypeOffset(CORINFO_CLASS_HANDLE typeHnd, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1) { SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc); assert(structDesc.passedInRegisters); GetStructTypeOffset(structDesc, type0, type1, offset0, offset1); } #endif // defined(UNIX_AMD64_ABI) /*****************************************************************************/ /*****************************************************************************/ #ifdef DEBUG Compiler::NodeToIntMap* Compiler::FindReachableNodesInNodeTestData() { NodeToIntMap* reachable = new (getAllocatorDebugOnly()) NodeToIntMap(getAllocatorDebugOnly()); if (m_nodeTestData == nullptr) { return reachable; } // Otherwise, iterate. for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->NonPhiStatements()) { for (GenTree* const tree : stmt->TreeList()) { TestLabelAndNum tlAndN; // For call nodes, translate late args to what they stand for. if (tree->OperGet() == GT_CALL) { GenTreeCall* call = tree->AsCall(); unsigned i = 0; for (GenTreeCall::Use& use : call->Args()) { if ((use.GetNode()->gtFlags & GTF_LATE_ARG) != 0) { // Find the corresponding late arg. GenTree* lateArg = call->fgArgInfo->GetArgNode(i); if (GetNodeTestData()->Lookup(lateArg, &tlAndN)) { reachable->Set(lateArg, 0); } } i++; } } if (GetNodeTestData()->Lookup(tree, &tlAndN)) { reachable->Set(tree, 0); } } } } return reachable; } void Compiler::TransferTestDataToNode(GenTree* from, GenTree* to) { TestLabelAndNum tlAndN; // We can't currently associate multiple annotations with a single node. // If we need to, we can fix this... // If the table is null, don't create it just to do the lookup, which would fail... if (m_nodeTestData != nullptr && GetNodeTestData()->Lookup(from, &tlAndN)) { assert(!GetNodeTestData()->Lookup(to, &tlAndN)); // We can't currently associate multiple annotations with a single node. // If we need to, we can fix this... TestLabelAndNum tlAndNTo; assert(!GetNodeTestData()->Lookup(to, &tlAndNTo)); GetNodeTestData()->Remove(from); GetNodeTestData()->Set(to, tlAndN); } } #endif // DEBUG /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX jvc XX XX XX XX Functions for the stand-alone version of the JIT . XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ void codeGeneratorCodeSizeBeg() { } /***************************************************************************** * * Used for counting pointer assignments. */ /*****************************************************************************/ void codeGeneratorCodeSizeEnd() { } /***************************************************************************** * * Gather statistics - mainly used for the standalone * Enable various #ifdef's to get the information you need */ void Compiler::compJitStats() { #if CALL_ARG_STATS /* Method types and argument statistics */ compCallArgStats(); #endif // CALL_ARG_STATS } #if CALL_ARG_STATS /***************************************************************************** * * Gather statistics about method calls and arguments */ void Compiler::compCallArgStats() { unsigned argNum; unsigned argDWordNum; unsigned argLngNum; unsigned argFltNum; unsigned argDblNum; unsigned regArgNum; unsigned regArgDeferred; unsigned regArgTemp; unsigned regArgLclVar; unsigned regArgConst; unsigned argTempsThisMethod = 0; assert(fgStmtListThreaded); for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { for (GenTree* const call : stmt->TreeList()) { if (call->gtOper != GT_CALL) continue; argNum = regArgNum = regArgDeferred = regArgTemp = regArgConst = regArgLclVar = argDWordNum = argLngNum = argFltNum = argDblNum = 0; argTotalCalls++; if (call->AsCall()->gtCallThisArg == nullptr) { if (call->AsCall()->gtCallType == CT_HELPER) { argHelperCalls++; } else { argStaticCalls++; } } else { /* We have a 'this' pointer */ argDWordNum++; argNum++; regArgNum++; regArgDeferred++; argTotalObjPtr++; if (call->AsCall()->IsVirtual()) { /* virtual function */ argVirtualCalls++; } else { argNonVirtualCalls++; } } } } } argTempsCntTable.record(argTempsThisMethod); if (argMaxTempsPerMethod < argTempsThisMethod) { argMaxTempsPerMethod = argTempsThisMethod; } } /* static */ void Compiler::compDispCallArgStats(FILE* fout) { if (argTotalCalls == 0) return; fprintf(fout, "\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Call stats\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Total # of calls = %d, calls / method = %.3f\n\n", argTotalCalls, (float)argTotalCalls / genMethodCnt); fprintf(fout, "Percentage of helper calls = %4.2f %%\n", (float)(100 * argHelperCalls) / argTotalCalls); fprintf(fout, "Percentage of static calls = %4.2f %%\n", (float)(100 * argStaticCalls) / argTotalCalls); fprintf(fout, "Percentage of virtual calls = %4.2f %%\n", (float)(100 * argVirtualCalls) / argTotalCalls); fprintf(fout, "Percentage of non-virtual calls = %4.2f %%\n\n", (float)(100 * argNonVirtualCalls) / argTotalCalls); fprintf(fout, "Average # of arguments per call = %.2f%%\n\n", (float)argTotalArgs / argTotalCalls); fprintf(fout, "Percentage of DWORD arguments = %.2f %%\n", (float)(100 * argTotalDWordArgs) / argTotalArgs); fprintf(fout, "Percentage of LONG arguments = %.2f %%\n", (float)(100 * argTotalLongArgs) / argTotalArgs); fprintf(fout, "Percentage of FLOAT arguments = %.2f %%\n", (float)(100 * argTotalFloatArgs) / argTotalArgs); fprintf(fout, "Percentage of DOUBLE arguments = %.2f %%\n\n", (float)(100 * argTotalDoubleArgs) / argTotalArgs); if (argTotalRegArgs == 0) return; /* fprintf(fout, "Total deferred arguments = %d \n", argTotalDeferred); fprintf(fout, "Total temp arguments = %d \n\n", argTotalTemps); fprintf(fout, "Total 'this' arguments = %d \n", argTotalObjPtr); fprintf(fout, "Total local var arguments = %d \n", argTotalLclVar); fprintf(fout, "Total constant arguments = %d \n\n", argTotalConst); */ fprintf(fout, "\nRegister Arguments:\n\n"); fprintf(fout, "Percentage of deferred arguments = %.2f %%\n", (float)(100 * argTotalDeferred) / argTotalRegArgs); fprintf(fout, "Percentage of temp arguments = %.2f %%\n\n", (float)(100 * argTotalTemps) / argTotalRegArgs); fprintf(fout, "Maximum # of temps per method = %d\n\n", argMaxTempsPerMethod); fprintf(fout, "Percentage of ObjPtr arguments = %.2f %%\n", (float)(100 * argTotalObjPtr) / argTotalRegArgs); // fprintf(fout, "Percentage of global arguments = %.2f %%\n", (float)(100 * argTotalDWordGlobEf) / // argTotalRegArgs); fprintf(fout, "Percentage of constant arguments = %.2f %%\n", (float)(100 * argTotalConst) / argTotalRegArgs); fprintf(fout, "Percentage of lcl var arguments = %.2f %%\n\n", (float)(100 * argTotalLclVar) / argTotalRegArgs); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Argument count frequency table (includes ObjPtr):\n"); fprintf(fout, "--------------------------------------------------\n"); argCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "DWORD argument count frequency table (w/o LONG):\n"); fprintf(fout, "--------------------------------------------------\n"); argDWordCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Temps count frequency table (per method):\n"); fprintf(fout, "--------------------------------------------------\n"); argTempsCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); /* fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "DWORD argument count frequency table (w/ LONG):\n"); fprintf(fout, "--------------------------------------------------\n"); argDWordLngCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); */ } #endif // CALL_ARG_STATS // JIT time end to end, and by phases. #ifdef FEATURE_JIT_METHOD_PERF // Static variables CritSecObject CompTimeSummaryInfo::s_compTimeSummaryLock; CompTimeSummaryInfo CompTimeSummaryInfo::s_compTimeSummary; #if MEASURE_CLRAPI_CALLS double JitTimer::s_cyclesPerSec = CachedCyclesPerSecond(); #endif #endif // FEATURE_JIT_METHOD_PERF #if defined(FEATURE_JIT_METHOD_PERF) || DUMP_FLOWGRAPHS || defined(FEATURE_TRACELOGGING) const char* PhaseNames[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) string_nm, #include "compphases.h" }; const char* PhaseEnums[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) #enum_nm, #include "compphases.h" }; const LPCWSTR PhaseShortNames[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) W(short_nm), #include "compphases.h" }; #endif // defined(FEATURE_JIT_METHOD_PERF) || DUMP_FLOWGRAPHS #ifdef FEATURE_JIT_METHOD_PERF bool PhaseHasChildren[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) hasChildren, #include "compphases.h" }; int PhaseParent[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) parent, #include "compphases.h" }; bool PhaseReportsIRSize[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) measureIR, #include "compphases.h" }; CompTimeInfo::CompTimeInfo(unsigned byteCodeBytes) : m_byteCodeBytes(byteCodeBytes) , m_totalCycles(0) , m_parentPhaseEndSlop(0) , m_timerFailure(false) #if MEASURE_CLRAPI_CALLS , m_allClrAPIcalls(0) , m_allClrAPIcycles(0) #endif { for (int i = 0; i < PHASE_NUMBER_OF; i++) { m_invokesByPhase[i] = 0; m_cyclesByPhase[i] = 0; #if MEASURE_CLRAPI_CALLS m_CLRinvokesByPhase[i] = 0; m_CLRcyclesByPhase[i] = 0; #endif } #if MEASURE_CLRAPI_CALLS assert(ArrLen(m_perClrAPIcalls) == API_ICorJitInfo_Names::API_COUNT); assert(ArrLen(m_perClrAPIcycles) == API_ICorJitInfo_Names::API_COUNT); assert(ArrLen(m_maxClrAPIcycles) == API_ICorJitInfo_Names::API_COUNT); for (int i = 0; i < API_ICorJitInfo_Names::API_COUNT; i++) { m_perClrAPIcalls[i] = 0; m_perClrAPIcycles[i] = 0; m_maxClrAPIcycles[i] = 0; } #endif } bool CompTimeSummaryInfo::IncludedInFilteredData(CompTimeInfo& info) { return false; // info.m_byteCodeBytes < 10; } //------------------------------------------------------------------------ // CompTimeSummaryInfo::AddInfo: Record timing info from one compile. // // Arguments: // info - The timing information to record. // includePhases - If "true", the per-phase info in "info" is valid, // which means that a "normal" compile has ended; if // the value is "false" we are recording the results // of a partial compile (typically an import-only run // on behalf of the inliner) in which case the phase // info is not valid and so we only record EE call // overhead. void CompTimeSummaryInfo::AddInfo(CompTimeInfo& info, bool includePhases) { if (info.m_timerFailure) { return; // Don't update if there was a failure. } CritSecHolder timeLock(s_compTimeSummaryLock); if (includePhases) { bool includeInFiltered = IncludedInFilteredData(info); m_numMethods++; // Update the totals and maxima. m_total.m_byteCodeBytes += info.m_byteCodeBytes; m_maximum.m_byteCodeBytes = max(m_maximum.m_byteCodeBytes, info.m_byteCodeBytes); m_total.m_totalCycles += info.m_totalCycles; m_maximum.m_totalCycles = max(m_maximum.m_totalCycles, info.m_totalCycles); #if MEASURE_CLRAPI_CALLS // Update the CLR-API values. m_total.m_allClrAPIcalls += info.m_allClrAPIcalls; m_maximum.m_allClrAPIcalls = max(m_maximum.m_allClrAPIcalls, info.m_allClrAPIcalls); m_total.m_allClrAPIcycles += info.m_allClrAPIcycles; m_maximum.m_allClrAPIcycles = max(m_maximum.m_allClrAPIcycles, info.m_allClrAPIcycles); #endif if (includeInFiltered) { m_numFilteredMethods++; m_filtered.m_byteCodeBytes += info.m_byteCodeBytes; m_filtered.m_totalCycles += info.m_totalCycles; m_filtered.m_parentPhaseEndSlop += info.m_parentPhaseEndSlop; } for (int i = 0; i < PHASE_NUMBER_OF; i++) { m_total.m_invokesByPhase[i] += info.m_invokesByPhase[i]; m_total.m_cyclesByPhase[i] += info.m_cyclesByPhase[i]; #if MEASURE_CLRAPI_CALLS m_total.m_CLRinvokesByPhase[i] += info.m_CLRinvokesByPhase[i]; m_total.m_CLRcyclesByPhase[i] += info.m_CLRcyclesByPhase[i]; #endif if (includeInFiltered) { m_filtered.m_invokesByPhase[i] += info.m_invokesByPhase[i]; m_filtered.m_cyclesByPhase[i] += info.m_cyclesByPhase[i]; #if MEASURE_CLRAPI_CALLS m_filtered.m_CLRinvokesByPhase[i] += info.m_CLRinvokesByPhase[i]; m_filtered.m_CLRcyclesByPhase[i] += info.m_CLRcyclesByPhase[i]; #endif } m_maximum.m_cyclesByPhase[i] = max(m_maximum.m_cyclesByPhase[i], info.m_cyclesByPhase[i]); #if MEASURE_CLRAPI_CALLS m_maximum.m_CLRcyclesByPhase[i] = max(m_maximum.m_CLRcyclesByPhase[i], info.m_CLRcyclesByPhase[i]); #endif } m_total.m_parentPhaseEndSlop += info.m_parentPhaseEndSlop; m_maximum.m_parentPhaseEndSlop = max(m_maximum.m_parentPhaseEndSlop, info.m_parentPhaseEndSlop); } #if MEASURE_CLRAPI_CALLS else { m_totMethods++; // Update the "global" CLR-API values. m_total.m_allClrAPIcalls += info.m_allClrAPIcalls; m_maximum.m_allClrAPIcalls = max(m_maximum.m_allClrAPIcalls, info.m_allClrAPIcalls); m_total.m_allClrAPIcycles += info.m_allClrAPIcycles; m_maximum.m_allClrAPIcycles = max(m_maximum.m_allClrAPIcycles, info.m_allClrAPIcycles); // Update the per-phase CLR-API values. m_total.m_invokesByPhase[PHASE_CLR_API] += info.m_allClrAPIcalls; m_maximum.m_invokesByPhase[PHASE_CLR_API] = max(m_maximum.m_perClrAPIcalls[PHASE_CLR_API], info.m_allClrAPIcalls); m_total.m_cyclesByPhase[PHASE_CLR_API] += info.m_allClrAPIcycles; m_maximum.m_cyclesByPhase[PHASE_CLR_API] = max(m_maximum.m_cyclesByPhase[PHASE_CLR_API], info.m_allClrAPIcycles); } for (int i = 0; i < API_ICorJitInfo_Names::API_COUNT; i++) { m_total.m_perClrAPIcalls[i] += info.m_perClrAPIcalls[i]; m_maximum.m_perClrAPIcalls[i] = max(m_maximum.m_perClrAPIcalls[i], info.m_perClrAPIcalls[i]); m_total.m_perClrAPIcycles[i] += info.m_perClrAPIcycles[i]; m_maximum.m_perClrAPIcycles[i] = max(m_maximum.m_perClrAPIcycles[i], info.m_perClrAPIcycles[i]); m_maximum.m_maxClrAPIcycles[i] = max(m_maximum.m_maxClrAPIcycles[i], info.m_maxClrAPIcycles[i]); } #endif } // Static LPCWSTR Compiler::compJitTimeLogFilename = nullptr; void CompTimeSummaryInfo::Print(FILE* f) { if (f == nullptr) { return; } // Otherwise... double countsPerSec = CachedCyclesPerSecond(); if (countsPerSec == 0.0) { fprintf(f, "Processor does not have a high-frequency timer.\n"); return; } double totTime_ms = 0.0; fprintf(f, "JIT Compilation time report:\n"); fprintf(f, " Compiled %d methods.\n", m_numMethods); if (m_numMethods != 0) { fprintf(f, " Compiled %d bytecodes total (%d max, %8.2f avg).\n", m_total.m_byteCodeBytes, m_maximum.m_byteCodeBytes, (double)m_total.m_byteCodeBytes / (double)m_numMethods); totTime_ms = ((double)m_total.m_totalCycles / countsPerSec) * 1000.0; fprintf(f, " Time: total: %10.3f Mcycles/%10.3f ms\n", ((double)m_total.m_totalCycles / 1000000.0), totTime_ms); fprintf(f, " max: %10.3f Mcycles/%10.3f ms\n", ((double)m_maximum.m_totalCycles) / 1000000.0, ((double)m_maximum.m_totalCycles / countsPerSec) * 1000.0); fprintf(f, " avg: %10.3f Mcycles/%10.3f ms\n", ((double)m_total.m_totalCycles) / 1000000.0 / (double)m_numMethods, totTime_ms / (double)m_numMethods); const char* extraHdr1 = ""; const char* extraHdr2 = ""; #if MEASURE_CLRAPI_CALLS bool extraInfo = (JitConfig.JitEECallTimingInfo() != 0); if (extraInfo) { extraHdr1 = " CLRs/meth % in CLR"; extraHdr2 = "-----------------------"; } #endif fprintf(f, "\n Total time by phases:\n"); fprintf(f, " PHASE inv/meth Mcycles time (ms) %% of total max (ms)%s\n", extraHdr1); fprintf(f, " ---------------------------------------------------------------------------------------%s\n", extraHdr2); // Ensure that at least the names array and the Phases enum have the same number of entries: assert(ArrLen(PhaseNames) == PHASE_NUMBER_OF); for (int i = 0; i < PHASE_NUMBER_OF; i++) { double phase_tot_ms = (((double)m_total.m_cyclesByPhase[i]) / countsPerSec) * 1000.0; double phase_max_ms = (((double)m_maximum.m_cyclesByPhase[i]) / countsPerSec) * 1000.0; #if MEASURE_CLRAPI_CALLS // Skip showing CLR API call info if we didn't collect any if (i == PHASE_CLR_API && !extraInfo) continue; #endif // Indent nested phases, according to depth. int ancPhase = PhaseParent[i]; while (ancPhase != -1) { fprintf(f, " "); ancPhase = PhaseParent[ancPhase]; } fprintf(f, " %-30s %6.2f %10.2f %9.3f %8.2f%% %8.3f", PhaseNames[i], ((double)m_total.m_invokesByPhase[i]) / ((double)m_numMethods), ((double)m_total.m_cyclesByPhase[i]) / 1000000.0, phase_tot_ms, (phase_tot_ms * 100.0 / totTime_ms), phase_max_ms); #if MEASURE_CLRAPI_CALLS if (extraInfo && i != PHASE_CLR_API) { double nest_tot_ms = (((double)m_total.m_CLRcyclesByPhase[i]) / countsPerSec) * 1000.0; double nest_percent = nest_tot_ms * 100.0 / totTime_ms; double calls_per_fn = ((double)m_total.m_CLRinvokesByPhase[i]) / ((double)m_numMethods); if (nest_percent > 0.1 || calls_per_fn > 10) fprintf(f, " %5.1f %8.2f%%", calls_per_fn, nest_percent); } #endif fprintf(f, "\n"); } // Show slop if it's over a certain percentage of the total double pslop_pct = 100.0 * m_total.m_parentPhaseEndSlop * 1000.0 / countsPerSec / totTime_ms; if (pslop_pct >= 1.0) { fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = " "%3.1f%% of total.\n\n", m_total.m_parentPhaseEndSlop / 1000000.0, pslop_pct); } } if (m_numFilteredMethods > 0) { fprintf(f, " Compiled %d methods that meet the filter requirement.\n", m_numFilteredMethods); fprintf(f, " Compiled %d bytecodes total (%8.2f avg).\n", m_filtered.m_byteCodeBytes, (double)m_filtered.m_byteCodeBytes / (double)m_numFilteredMethods); double totTime_ms = ((double)m_filtered.m_totalCycles / countsPerSec) * 1000.0; fprintf(f, " Time: total: %10.3f Mcycles/%10.3f ms\n", ((double)m_filtered.m_totalCycles / 1000000.0), totTime_ms); fprintf(f, " avg: %10.3f Mcycles/%10.3f ms\n", ((double)m_filtered.m_totalCycles) / 1000000.0 / (double)m_numFilteredMethods, totTime_ms / (double)m_numFilteredMethods); fprintf(f, " Total time by phases:\n"); fprintf(f, " PHASE inv/meth Mcycles time (ms) %% of total\n"); fprintf(f, " --------------------------------------------------------------------------------------\n"); // Ensure that at least the names array and the Phases enum have the same number of entries: assert(ArrLen(PhaseNames) == PHASE_NUMBER_OF); for (int i = 0; i < PHASE_NUMBER_OF; i++) { double phase_tot_ms = (((double)m_filtered.m_cyclesByPhase[i]) / countsPerSec) * 1000.0; // Indent nested phases, according to depth. int ancPhase = PhaseParent[i]; while (ancPhase != -1) { fprintf(f, " "); ancPhase = PhaseParent[ancPhase]; } fprintf(f, " %-30s %5.2f %10.2f %9.3f %8.2f%%\n", PhaseNames[i], ((double)m_filtered.m_invokesByPhase[i]) / ((double)m_numFilteredMethods), ((double)m_filtered.m_cyclesByPhase[i]) / 1000000.0, phase_tot_ms, (phase_tot_ms * 100.0 / totTime_ms)); } double fslop_ms = m_filtered.m_parentPhaseEndSlop * 1000.0 / countsPerSec; if (fslop_ms > 1.0) { fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = " "%3.1f%% of total.\n\n", m_filtered.m_parentPhaseEndSlop / 1000000.0, fslop_ms); } } #if MEASURE_CLRAPI_CALLS if (m_total.m_allClrAPIcalls > 0 && m_total.m_allClrAPIcycles > 0) { fprintf(f, "\n"); if (m_totMethods > 0) fprintf(f, " Imported %u methods.\n\n", m_numMethods + m_totMethods); fprintf(f, " CLR API # calls total time max time avg time %% " "of total\n"); fprintf(f, " -------------------------------------------------------------------------------"); fprintf(f, "---------------------\n"); static const char* APInames[] = { #define DEF_CLR_API(name) #name, #include "ICorJitInfo_API_names.h" }; unsigned shownCalls = 0; double shownMillis = 0.0; #ifdef DEBUG unsigned checkedCalls = 0; double checkedMillis = 0.0; #endif for (unsigned pass = 0; pass < 2; pass++) { for (unsigned i = 0; i < API_ICorJitInfo_Names::API_COUNT; i++) { unsigned calls = m_total.m_perClrAPIcalls[i]; if (calls == 0) continue; unsigned __int64 cycles = m_total.m_perClrAPIcycles[i]; double millis = 1000.0 * cycles / countsPerSec; // Don't show the small fry to keep the results manageable if (millis < 0.5) { // We always show the following API because it is always called // exactly once for each method and its body is the simplest one // possible (it just returns an integer constant), and therefore // it can be used to measure the overhead of adding the CLR API // timing code. Roughly speaking, on a 3GHz x64 box the overhead // per call should be around 40 ns when using RDTSC, compared to // about 140 ns when using GetThreadCycles() under Windows. if (i != API_ICorJitInfo_Names::API_getExpectedTargetArchitecture) continue; } // In the first pass we just compute the totals. if (pass == 0) { shownCalls += m_total.m_perClrAPIcalls[i]; shownMillis += millis; continue; } unsigned __int32 maxcyc = m_maximum.m_maxClrAPIcycles[i]; double max_ms = 1000.0 * maxcyc / countsPerSec; fprintf(f, " %-40s", APInames[i]); // API name fprintf(f, " %8u %9.1f ms", calls, millis); // #calls, total time fprintf(f, " %8.1f ms %8.1f ns", max_ms, 1000000.0 * millis / calls); // max, avg time fprintf(f, " %5.1f%%\n", 100.0 * millis / shownMillis); // % of total #ifdef DEBUG checkedCalls += m_total.m_perClrAPIcalls[i]; checkedMillis += millis; #endif } } #ifdef DEBUG assert(checkedCalls == shownCalls); assert(checkedMillis == shownMillis); #endif if (shownCalls > 0 || shownMillis > 0) { fprintf(f, " -------------------------"); fprintf(f, "---------------------------------------------------------------------------\n"); fprintf(f, " Total for calls shown above %8u %10.1f ms", shownCalls, shownMillis); if (totTime_ms > 0.0) fprintf(f, " (%4.1lf%% of overall JIT time)", shownMillis * 100.0 / totTime_ms); fprintf(f, "\n"); } fprintf(f, "\n"); } #endif fprintf(f, "\n"); } JitTimer::JitTimer(unsigned byteCodeSize) : m_info(byteCodeSize) { #if MEASURE_CLRAPI_CALLS m_CLRcallInvokes = 0; m_CLRcallCycles = 0; #endif #ifdef DEBUG m_lastPhase = (Phases)-1; #if MEASURE_CLRAPI_CALLS m_CLRcallAPInum = -1; #endif #endif unsigned __int64 threadCurCycles; if (_our_GetThreadCycles(&threadCurCycles)) { m_start = threadCurCycles; m_curPhaseStart = threadCurCycles; } } void JitTimer::EndPhase(Compiler* compiler, Phases phase) { // Otherwise... // We re-run some phases currently, so this following assert doesn't work. // assert((int)phase > (int)m_lastPhase); // We should end phases in increasing order. unsigned __int64 threadCurCycles; if (_our_GetThreadCycles(&threadCurCycles)) { unsigned __int64 phaseCycles = (threadCurCycles - m_curPhaseStart); // If this is not a leaf phase, the assumption is that the last subphase must have just recently ended. // Credit the duration to "slop", the total of which should be very small. if (PhaseHasChildren[phase]) { m_info.m_parentPhaseEndSlop += phaseCycles; } else { // It is a leaf phase. Credit duration to it. m_info.m_invokesByPhase[phase]++; m_info.m_cyclesByPhase[phase] += phaseCycles; #if MEASURE_CLRAPI_CALLS // Record the CLR API timing info as well. m_info.m_CLRinvokesByPhase[phase] += m_CLRcallInvokes; m_info.m_CLRcyclesByPhase[phase] += m_CLRcallCycles; #endif // Credit the phase's ancestors, if any. int ancPhase = PhaseParent[phase]; while (ancPhase != -1) { m_info.m_cyclesByPhase[ancPhase] += phaseCycles; ancPhase = PhaseParent[ancPhase]; } #if MEASURE_CLRAPI_CALLS const Phases lastPhase = PHASE_CLR_API; #else const Phases lastPhase = PHASE_NUMBER_OF; #endif if (phase + 1 == lastPhase) { m_info.m_totalCycles = (threadCurCycles - m_start); } else { m_curPhaseStart = threadCurCycles; } } if ((JitConfig.JitMeasureIR() != 0) && PhaseReportsIRSize[phase]) { m_info.m_nodeCountAfterPhase[phase] = compiler->fgMeasureIR(); } else { m_info.m_nodeCountAfterPhase[phase] = 0; } } #ifdef DEBUG m_lastPhase = phase; #endif #if MEASURE_CLRAPI_CALLS m_CLRcallInvokes = 0; m_CLRcallCycles = 0; #endif } #if MEASURE_CLRAPI_CALLS //------------------------------------------------------------------------ // JitTimer::CLRApiCallEnter: Start the stopwatch for an EE call. // // Arguments: // apix - The API index - an "enum API_ICorJitInfo_Names" value. // void JitTimer::CLRApiCallEnter(unsigned apix) { assert(m_CLRcallAPInum == -1); // Nested calls not allowed m_CLRcallAPInum = apix; // If we can't get the cycles, we'll just ignore this call if (!_our_GetThreadCycles(&m_CLRcallStart)) m_CLRcallStart = 0; } //------------------------------------------------------------------------ // JitTimer::CLRApiCallLeave: compute / record time spent in an EE call. // // Arguments: // apix - The API's "enum API_ICorJitInfo_Names" value; this value // should match the value passed to the most recent call to // "CLRApiCallEnter" (i.e. these must come as matched pairs), // and they also may not nest. // void JitTimer::CLRApiCallLeave(unsigned apix) { // Make sure we're actually inside a measured CLR call. assert(m_CLRcallAPInum != -1); m_CLRcallAPInum = -1; // Ignore this one if we don't have a valid starting counter. if (m_CLRcallStart != 0) { if (JitConfig.JitEECallTimingInfo() != 0) { unsigned __int64 threadCurCycles; if (_our_GetThreadCycles(&threadCurCycles)) { // Compute the cycles spent in the call. threadCurCycles -= m_CLRcallStart; // Add the cycles to the 'phase' and bump its use count. m_info.m_cyclesByPhase[PHASE_CLR_API] += threadCurCycles; m_info.m_invokesByPhase[PHASE_CLR_API] += 1; // Add the values to the "per API" info. m_info.m_allClrAPIcycles += threadCurCycles; m_info.m_allClrAPIcalls += 1; m_info.m_perClrAPIcalls[apix] += 1; m_info.m_perClrAPIcycles[apix] += threadCurCycles; m_info.m_maxClrAPIcycles[apix] = max(m_info.m_maxClrAPIcycles[apix], (unsigned __int32)threadCurCycles); // Subtract the cycles from the enclosing phase by bumping its start time m_curPhaseStart += threadCurCycles; // Update the running totals. m_CLRcallInvokes += 1; m_CLRcallCycles += threadCurCycles; } } m_CLRcallStart = 0; } assert(m_CLRcallAPInum != -1); // No longer in this API call. m_CLRcallAPInum = -1; } #endif // MEASURE_CLRAPI_CALLS CritSecObject JitTimer::s_csvLock; // It's expensive to constantly open and close the file, so open it once and close it // when the process exits. This should be accessed under the s_csvLock. FILE* JitTimer::s_csvFile = nullptr; LPCWSTR Compiler::JitTimeLogCsv() { LPCWSTR jitTimeLogCsv = JitConfig.JitTimeLogCsv(); return jitTimeLogCsv; } void JitTimer::PrintCsvHeader() { LPCWSTR jitTimeLogCsv = Compiler::JitTimeLogCsv(); if (jitTimeLogCsv == nullptr) { return; } CritSecHolder csvLock(s_csvLock); if (s_csvFile == nullptr) { s_csvFile = _wfopen(jitTimeLogCsv, W("a")); } if (s_csvFile != nullptr) { // Seek to the end of the file s.t. `ftell` doesn't lie to us on Windows fseek(s_csvFile, 0, SEEK_END); // Write the header if the file is empty if (ftell(s_csvFile) == 0) { fprintf(s_csvFile, "\"Method Name\","); fprintf(s_csvFile, "\"Assembly or SPMI Index\","); fprintf(s_csvFile, "\"IL Bytes\","); fprintf(s_csvFile, "\"Basic Blocks\","); fprintf(s_csvFile, "\"Min Opts\","); fprintf(s_csvFile, "\"Loops\","); fprintf(s_csvFile, "\"Loops Cloned\","); #if FEATURE_LOOP_ALIGN #ifdef DEBUG fprintf(s_csvFile, "\"Alignment Candidates\","); fprintf(s_csvFile, "\"Loops Aligned\","); #endif // DEBUG #endif // FEATURE_LOOP_ALIGN for (int i = 0; i < PHASE_NUMBER_OF; i++) { fprintf(s_csvFile, "\"%s\",", PhaseNames[i]); if ((JitConfig.JitMeasureIR() != 0) && PhaseReportsIRSize[i]) { fprintf(s_csvFile, "\"Node Count After %s\",", PhaseNames[i]); } } InlineStrategy::DumpCsvHeader(s_csvFile); fprintf(s_csvFile, "\"Executable Code Bytes\","); fprintf(s_csvFile, "\"GC Info Bytes\","); fprintf(s_csvFile, "\"Total Bytes Allocated\","); fprintf(s_csvFile, "\"Total Cycles\","); fprintf(s_csvFile, "\"CPS\"\n"); fflush(s_csvFile); } } } void JitTimer::PrintCsvMethodStats(Compiler* comp) { LPCWSTR jitTimeLogCsv = Compiler::JitTimeLogCsv(); if (jitTimeLogCsv == nullptr) { return; } // eeGetMethodFullName uses locks, so don't enter crit sec before this call. #if defined(DEBUG) || defined(LATE_DISASM) // If we already have computed the name because for some reason we're generating the CSV // for a DEBUG build (presumably not for the time info), just re-use it. const char* methName = comp->info.compFullName; #else const char* methName = comp->eeGetMethodFullName(comp->info.compMethodHnd); #endif // Try and access the SPMI index to report in the data set. // // If the jit is not hosted under SPMI this will return the // default value of zero. // // Query the jit host directly here instead of going via the // config cache, since value will change for each method. int index = g_jitHost->getIntConfigValue(W("SuperPMIMethodContextNumber"), -1); CritSecHolder csvLock(s_csvLock); if (s_csvFile == nullptr) { return; } fprintf(s_csvFile, "\"%s\",", methName); if (index != 0) { fprintf(s_csvFile, "%d,", index); } else { const char* methodAssemblyName = comp->info.compCompHnd->getAssemblyName( comp->info.compCompHnd->getModuleAssembly(comp->info.compCompHnd->getClassModule(comp->info.compClassHnd))); fprintf(s_csvFile, "\"%s\",", methodAssemblyName); } fprintf(s_csvFile, "%u,", comp->info.compILCodeSize); fprintf(s_csvFile, "%u,", comp->fgBBcount); fprintf(s_csvFile, "%u,", comp->opts.MinOpts()); fprintf(s_csvFile, "%u,", comp->optLoopCount); fprintf(s_csvFile, "%u,", comp->optLoopsCloned); #if FEATURE_LOOP_ALIGN #ifdef DEBUG fprintf(s_csvFile, "%u,", comp->loopAlignCandidates); fprintf(s_csvFile, "%u,", comp->loopsAligned); #endif // DEBUG #endif // FEATURE_LOOP_ALIGN unsigned __int64 totCycles = 0; for (int i = 0; i < PHASE_NUMBER_OF; i++) { if (!PhaseHasChildren[i]) { totCycles += m_info.m_cyclesByPhase[i]; } fprintf(s_csvFile, "%I64u,", m_info.m_cyclesByPhase[i]); if ((JitConfig.JitMeasureIR() != 0) && PhaseReportsIRSize[i]) { fprintf(s_csvFile, "%u,", m_info.m_nodeCountAfterPhase[i]); } } comp->m_inlineStrategy->DumpCsvData(s_csvFile); fprintf(s_csvFile, "%u,", comp->info.compNativeCodeSize); fprintf(s_csvFile, "%Iu,", comp->compInfoBlkSize); fprintf(s_csvFile, "%Iu,", comp->compGetArenaAllocator()->getTotalBytesAllocated()); fprintf(s_csvFile, "%I64u,", m_info.m_totalCycles); fprintf(s_csvFile, "%f\n", CachedCyclesPerSecond()); fflush(s_csvFile); } // Perform process shutdown actions. // // static void JitTimer::Shutdown() { CritSecHolder csvLock(s_csvLock); if (s_csvFile != nullptr) { fclose(s_csvFile); } } // Completes the timing of the current method, and adds it to "sum". void JitTimer::Terminate(Compiler* comp, CompTimeSummaryInfo& sum, bool includePhases) { if (includePhases) { PrintCsvMethodStats(comp); } sum.AddInfo(m_info, includePhases); } #endif // FEATURE_JIT_METHOD_PERF #if LOOP_HOIST_STATS // Static fields. CritSecObject Compiler::s_loopHoistStatsLock; // Default constructor. unsigned Compiler::s_loopsConsidered = 0; unsigned Compiler::s_loopsWithHoistedExpressions = 0; unsigned Compiler::s_totalHoistedExpressions = 0; // static void Compiler::PrintAggregateLoopHoistStats(FILE* f) { fprintf(f, "\n"); fprintf(f, "---------------------------------------------------\n"); fprintf(f, "Loop hoisting stats\n"); fprintf(f, "---------------------------------------------------\n"); double pctWithHoisted = 0.0; if (s_loopsConsidered > 0) { pctWithHoisted = 100.0 * (double(s_loopsWithHoistedExpressions) / double(s_loopsConsidered)); } double exprsPerLoopWithExpr = 0.0; if (s_loopsWithHoistedExpressions > 0) { exprsPerLoopWithExpr = double(s_totalHoistedExpressions) / double(s_loopsWithHoistedExpressions); } fprintf(f, "Considered %d loops. Of these, we hoisted expressions out of %d (%6.2f%%).\n", s_loopsConsidered, s_loopsWithHoistedExpressions, pctWithHoisted); fprintf(f, " A total of %d expressions were hoisted, an average of %5.2f per loop-with-hoisted-expr.\n", s_totalHoistedExpressions, exprsPerLoopWithExpr); } void Compiler::AddLoopHoistStats() { CritSecHolder statsLock(s_loopHoistStatsLock); s_loopsConsidered += m_loopsConsidered; s_loopsWithHoistedExpressions += m_loopsWithHoistedExpressions; s_totalHoistedExpressions += m_totalHoistedExpressions; } void Compiler::PrintPerMethodLoopHoistStats() { double pctWithHoisted = 0.0; if (m_loopsConsidered > 0) { pctWithHoisted = 100.0 * (double(m_loopsWithHoistedExpressions) / double(m_loopsConsidered)); } double exprsPerLoopWithExpr = 0.0; if (m_loopsWithHoistedExpressions > 0) { exprsPerLoopWithExpr = double(m_totalHoistedExpressions) / double(m_loopsWithHoistedExpressions); } printf("Considered %d loops. Of these, we hoisted expressions out of %d (%5.2f%%).\n", m_loopsConsidered, m_loopsWithHoistedExpressions, pctWithHoisted); printf(" A total of %d expressions were hoisted, an average of %5.2f per loop-with-hoisted-expr.\n", m_totalHoistedExpressions, exprsPerLoopWithExpr); } #endif // LOOP_HOIST_STATS //------------------------------------------------------------------------ // RecordStateAtEndOfInlining: capture timing data (if enabled) after // inlining as completed. // // Note: // Records data needed for SQM and inlining data dumps. Should be // called after inlining is complete. (We do this after inlining // because this marks the last point at which the JIT is likely to // cause type-loading and class initialization). void Compiler::RecordStateAtEndOfInlining() { #if defined(DEBUG) || defined(INLINE_DATA) m_compCyclesAtEndOfInlining = 0; m_compTickCountAtEndOfInlining = 0; bool b = CycleTimer::GetThreadCyclesS(&m_compCyclesAtEndOfInlining); if (!b) { return; // We don't have a thread cycle counter. } m_compTickCountAtEndOfInlining = GetTickCount(); #endif // defined(DEBUG) || defined(INLINE_DATA) } //------------------------------------------------------------------------ // RecordStateAtEndOfCompilation: capture timing data (if enabled) after // compilation is completed. void Compiler::RecordStateAtEndOfCompilation() { #if defined(DEBUG) || defined(INLINE_DATA) // Common portion m_compCycles = 0; unsigned __int64 compCyclesAtEnd; bool b = CycleTimer::GetThreadCyclesS(&compCyclesAtEnd); if (!b) { return; // We don't have a thread cycle counter. } assert(compCyclesAtEnd >= m_compCyclesAtEndOfInlining); m_compCycles = compCyclesAtEnd - m_compCyclesAtEndOfInlining; #endif // defined(DEBUG) || defined(INLINE_DATA) } #if FUNC_INFO_LOGGING // static LPCWSTR Compiler::compJitFuncInfoFilename = nullptr; // static FILE* Compiler::compJitFuncInfoFile = nullptr; #endif // FUNC_INFO_LOGGING #ifdef DEBUG // dumpConvertedVarSet() dumps the varset bits that are tracked // variable indices, and we convert them to variable numbers, sort the variable numbers, and // print them as variable numbers. To do this, we use a temporary set indexed by // variable number. We can't use the "all varset" type because it is still size-limited, and might // not be big enough to handle all possible variable numbers. void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars) { BYTE* pVarNumSet; // trivial set: one byte per varNum, 0 means not in set, 1 means in set. size_t varNumSetBytes = comp->lvaCount * sizeof(BYTE); pVarNumSet = (BYTE*)_alloca(varNumSetBytes); memset(pVarNumSet, 0, varNumSetBytes); // empty the set VarSetOps::Iter iter(comp, vars); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { unsigned varNum = comp->lvaTrackedIndexToLclNum(varIndex); pVarNumSet[varNum] = 1; // This varNum is in the set } bool first = true; printf("{"); for (size_t varNum = 0; varNum < comp->lvaCount; varNum++) { if (pVarNumSet[varNum] == 1) { if (!first) { printf(" "); } printf("V%02u", varNum); first = false; } } printf("}"); } /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Debugging helpers XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ /* The following functions are intended to be called from the debugger, to dump * various data structures. * * The versions that start with 'c' take a Compiler* as the first argument. * The versions that start with 'd' use the tlsCompiler, so don't require a Compiler*. * * Summary: * cBlock, dBlock : Display a basic block (call fgTableDispBasicBlock()). * cBlocks, dBlocks : Display all the basic blocks of a function (call fgDispBasicBlocks()). * cBlocksV, dBlocksV : Display all the basic blocks of a function (call fgDispBasicBlocks(true)). * "V" means "verbose", and will dump all the trees. * cStmt, dStmt : Display a Statement (call gtDispStmt()). * cTree, dTree : Display a tree (call gtDispTree()). * cTreeLIR, dTreeLIR : Display a tree in LIR form (call gtDispLIRNode()). * cTrees, dTrees : Display all the trees in a function (call fgDumpTrees()). * cEH, dEH : Display the EH handler table (call fgDispHandlerTab()). * cVar, dVar : Display a local variable given its number (call lvaDumpEntry()). * cVarDsc, dVarDsc : Display a local variable given a LclVarDsc* (call lvaDumpEntry()). * cVars, dVars : Display the local variable table (call lvaTableDump()). * cVarsFinal, dVarsFinal : Display the local variable table (call lvaTableDump(FINAL_FRAME_LAYOUT)). * cBlockCheapPreds, dBlockCheapPreds : Display a block's cheap predecessors (call block->dspCheapPreds()). * cBlockPreds, dBlockPreds : Display a block's predecessors (call block->dspPreds()). * cBlockSuccs, dBlockSuccs : Display a block's successors (call block->dspSuccs(compiler)). * cReach, dReach : Display all block reachability (call fgDispReach()). * cDoms, dDoms : Display all block dominators (call fgDispDoms()). * cLiveness, dLiveness : Display per-block variable liveness (call fgDispBBLiveness()). * cCVarSet, dCVarSet : Display a "converted" VARSET_TP: the varset is assumed to be tracked variable * indices. These are converted to variable numbers and sorted. (Calls * dumpConvertedVarSet()). * cLoop, dLoop : Display the blocks of a loop, including the trees. * cTreeFlags, dTreeFlags : Display tree flags * * The following don't require a Compiler* to work: * dRegMask : Display a regMaskTP (call dspRegMask(mask)). * dBlockList : Display a BasicBlockList*. */ void cBlock(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Block %u\n", sequenceNumber++); comp->fgTableDispBasicBlock(block); } void cBlocks(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Blocks %u\n", sequenceNumber++); comp->fgDispBasicBlocks(); } void cBlocksV(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlocksV %u\n", sequenceNumber++); comp->fgDispBasicBlocks(true); } void cStmt(Compiler* comp, Statement* statement) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Stmt %u\n", sequenceNumber++); comp->gtDispStmt(statement, ">>>"); } void cTree(Compiler* comp, GenTree* tree) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Tree %u\n", sequenceNumber++); comp->gtDispTree(tree, nullptr, ">>>"); } void cTreeLIR(Compiler* comp, GenTree* tree) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *TreeLIR %u\n", sequenceNumber++); comp->gtDispLIRNode(tree); } void cTrees(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Trees %u\n", sequenceNumber++); comp->fgDumpTrees(comp->fgFirstBB, nullptr); } void cEH(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *EH %u\n", sequenceNumber++); comp->fgDispHandlerTab(); } void cVar(Compiler* comp, unsigned lclNum) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Var %u\n", sequenceNumber++); comp->lvaDumpEntry(lclNum, Compiler::FINAL_FRAME_LAYOUT); } void cVarDsc(Compiler* comp, LclVarDsc* varDsc) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *VarDsc %u\n", sequenceNumber++); unsigned lclNum = comp->lvaGetLclNum(varDsc); comp->lvaDumpEntry(lclNum, Compiler::FINAL_FRAME_LAYOUT); } void cVars(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Vars %u\n", sequenceNumber++); comp->lvaTableDump(); } void cVarsFinal(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Vars %u\n", sequenceNumber++); comp->lvaTableDump(Compiler::FINAL_FRAME_LAYOUT); } void cBlockCheapPreds(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlockCheapPreds %u\n", sequenceNumber++); block->dspCheapPreds(); } void cBlockPreds(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlockPreds %u\n", sequenceNumber++); block->dspPreds(); } void cBlockSuccs(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlockSuccs %u\n", sequenceNumber++); block->dspSuccs(comp); } void cReach(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Reach %u\n", sequenceNumber++); comp->fgDispReach(); } void cDoms(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Doms %u\n", sequenceNumber++); comp->fgDispDoms(); } void cLiveness(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Liveness %u\n", sequenceNumber++); comp->fgDispBBLiveness(); } void cCVarSet(Compiler* comp, VARSET_VALARG_TP vars) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *CVarSet %u\n", sequenceNumber++); dumpConvertedVarSet(comp, vars); printf("\n"); // dumpConvertedVarSet() doesn't emit a trailing newline } void cLoop(Compiler* comp, unsigned loopNum) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Loop %u\n", sequenceNumber++); comp->optPrintLoopInfo(loopNum, /* verbose */ true); printf("\n"); } void cLoopPtr(Compiler* comp, const Compiler::LoopDsc* loop) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *LoopPtr %u\n", sequenceNumber++); comp->optPrintLoopInfo(loop, /* verbose */ true); printf("\n"); } void cLoops(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Loops %u\n", sequenceNumber++); comp->optPrintLoopTable(); } void dBlock(BasicBlock* block) { cBlock(JitTls::GetCompiler(), block); } void dBlocks() { cBlocks(JitTls::GetCompiler()); } void dBlocksV() { cBlocksV(JitTls::GetCompiler()); } void dStmt(Statement* statement) { cStmt(JitTls::GetCompiler(), statement); } void dTree(GenTree* tree) { cTree(JitTls::GetCompiler(), tree); } void dTreeLIR(GenTree* tree) { cTreeLIR(JitTls::GetCompiler(), tree); } void dTreeRange(GenTree* first, GenTree* last) { Compiler* comp = JitTls::GetCompiler(); GenTree* cur = first; while (true) { cTreeLIR(comp, cur); if (cur == last) break; cur = cur->gtNext; } } void dTrees() { cTrees(JitTls::GetCompiler()); } void dEH() { cEH(JitTls::GetCompiler()); } void dVar(unsigned lclNum) { cVar(JitTls::GetCompiler(), lclNum); } void dVarDsc(LclVarDsc* varDsc) { cVarDsc(JitTls::GetCompiler(), varDsc); } void dVars() { cVars(JitTls::GetCompiler()); } void dVarsFinal() { cVarsFinal(JitTls::GetCompiler()); } void dBlockPreds(BasicBlock* block) { cBlockPreds(JitTls::GetCompiler(), block); } void dBlockCheapPreds(BasicBlock* block) { cBlockCheapPreds(JitTls::GetCompiler(), block); } void dBlockSuccs(BasicBlock* block) { cBlockSuccs(JitTls::GetCompiler(), block); } void dReach() { cReach(JitTls::GetCompiler()); } void dDoms() { cDoms(JitTls::GetCompiler()); } void dLiveness() { cLiveness(JitTls::GetCompiler()); } void dCVarSet(VARSET_VALARG_TP vars) { cCVarSet(JitTls::GetCompiler(), vars); } void dLoop(unsigned loopNum) { cLoop(JitTls::GetCompiler(), loopNum); } void dLoopPtr(const Compiler::LoopDsc* loop) { cLoopPtr(JitTls::GetCompiler(), loop); } void dLoops() { cLoops(JitTls::GetCompiler()); } void dRegMask(regMaskTP mask) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== dRegMask %u\n", sequenceNumber++); dspRegMask(mask); printf("\n"); // dspRegMask() doesn't emit a trailing newline } void dBlockList(BasicBlockList* list) { printf("WorkList: "); while (list != nullptr) { printf(FMT_BB " ", list->block->bbNum); list = list->next; } printf("\n"); } // Global variables available in debug mode. That are set by debug APIs for finding // Trees, Stmts, and/or Blocks using id or bbNum. // That can be used in watch window or as a way to get address of fields for data break points. GenTree* dbTree; Statement* dbStmt; BasicBlock* dbTreeBlock; BasicBlock* dbBlock; // Debug APIs for finding Trees, Stmts, and/or Blocks. // As a side effect, they set the debug variables above. GenTree* dFindTree(GenTree* tree, unsigned id) { if (tree == nullptr) { return nullptr; } if (tree->gtTreeID == id) { dbTree = tree; return tree; } GenTree* child = nullptr; tree->VisitOperands([&child, id](GenTree* operand) -> GenTree::VisitResult { child = dFindTree(child, id); return (child != nullptr) ? GenTree::VisitResult::Abort : GenTree::VisitResult::Continue; }); return child; } GenTree* dFindTree(unsigned id) { Compiler* comp = JitTls::GetCompiler(); GenTree* tree; dbTreeBlock = nullptr; dbTree = nullptr; for (BasicBlock* const block : comp->Blocks()) { for (Statement* const stmt : block->Statements()) { tree = dFindTree(stmt->GetRootNode(), id); if (tree != nullptr) { dbTreeBlock = block; return tree; } } } return nullptr; } Statement* dFindStmt(unsigned id) { Compiler* comp = JitTls::GetCompiler(); dbStmt = nullptr; unsigned stmtId = 0; for (BasicBlock* const block : comp->Blocks()) { for (Statement* const stmt : block->Statements()) { stmtId++; if (stmtId == id) { dbStmt = stmt; return stmt; } } } return nullptr; } BasicBlock* dFindBlock(unsigned bbNum) { Compiler* comp = JitTls::GetCompiler(); BasicBlock* block = nullptr; dbBlock = nullptr; for (block = comp->fgFirstBB; block != nullptr; block = block->bbNext) { if (block->bbNum == bbNum) { dbBlock = block; break; } } return block; } Compiler::LoopDsc* dFindLoop(unsigned loopNum) { Compiler* comp = JitTls::GetCompiler(); if (loopNum >= comp->optLoopCount) { printf("loopNum %u out of range\n"); return nullptr; } return &comp->optLoopTable[loopNum]; } void cTreeFlags(Compiler* comp, GenTree* tree) { int chars = 0; if (tree->gtFlags != 0) { chars += printf("flags="); // Node flags CLANG_FORMAT_COMMENT_ANCHOR; #if defined(DEBUG) if (tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE) { chars += printf("[NODE_LARGE]"); } if (tree->gtDebugFlags & GTF_DEBUG_NODE_SMALL) { chars += printf("[NODE_SMALL]"); } if (tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) { chars += printf("[MORPHED]"); } #endif // defined(DEBUG) if (tree->gtFlags & GTF_COLON_COND) { chars += printf("[COLON_COND]"); } // Operator flags genTreeOps op = tree->OperGet(); switch (op) { case GT_LCL_VAR: case GT_LCL_VAR_ADDR: case GT_LCL_FLD: case GT_LCL_FLD_ADDR: case GT_STORE_LCL_FLD: case GT_STORE_LCL_VAR: if (tree->gtFlags & GTF_VAR_DEF) { chars += printf("[VAR_DEF]"); } if (tree->gtFlags & GTF_VAR_USEASG) { chars += printf("[VAR_USEASG]"); } if (tree->gtFlags & GTF_VAR_CAST) { chars += printf("[VAR_CAST]"); } if (tree->gtFlags & GTF_VAR_ITERATOR) { chars += printf("[VAR_ITERATOR]"); } if (tree->gtFlags & GTF_VAR_CLONED) { chars += printf("[VAR_CLONED]"); } if (tree->gtFlags & GTF_VAR_DEATH) { chars += printf("[VAR_DEATH]"); } if (tree->gtFlags & GTF_VAR_ARR_INDEX) { chars += printf("[VAR_ARR_INDEX]"); } #if defined(DEBUG) if (tree->gtDebugFlags & GTF_DEBUG_VAR_CSE_REF) { chars += printf("[VAR_CSE_REF]"); } #endif break; case GT_NO_OP: break; case GT_FIELD: if (tree->gtFlags & GTF_FLD_VOLATILE) { chars += printf("[FLD_VOLATILE]"); } break; case GT_INDEX: if (tree->gtFlags & GTF_INX_STRING_LAYOUT) { chars += printf("[INX_STRING_LAYOUT]"); } FALLTHROUGH; case GT_INDEX_ADDR: if (tree->gtFlags & GTF_INX_RNGCHK) { chars += printf("[INX_RNGCHK]"); } break; case GT_IND: case GT_STOREIND: if (tree->gtFlags & GTF_IND_VOLATILE) { chars += printf("[IND_VOLATILE]"); } if (tree->gtFlags & GTF_IND_TGTANYWHERE) { chars += printf("[IND_TGTANYWHERE]"); } if (tree->gtFlags & GTF_IND_TGT_NOT_HEAP) { chars += printf("[IND_TGT_NOT_HEAP]"); } if (tree->gtFlags & GTF_IND_TLS_REF) { chars += printf("[IND_TLS_REF]"); } if (tree->gtFlags & GTF_IND_ASG_LHS) { chars += printf("[IND_ASG_LHS]"); } if (tree->gtFlags & GTF_IND_UNALIGNED) { chars += printf("[IND_UNALIGNED]"); } if (tree->gtFlags & GTF_IND_INVARIANT) { chars += printf("[IND_INVARIANT]"); } if (tree->gtFlags & GTF_IND_NONNULL) { chars += printf("[IND_NONNULL]"); } break; case GT_CLS_VAR: if (tree->gtFlags & GTF_CLS_VAR_ASG_LHS) { chars += printf("[CLS_VAR_ASG_LHS]"); } break; case GT_MUL: #if !defined(TARGET_64BIT) case GT_MUL_LONG: #endif if (tree->gtFlags & GTF_MUL_64RSLT) { chars += printf("[64RSLT]"); } if (tree->gtFlags & GTF_ADDRMODE_NO_CSE) { chars += printf("[ADDRMODE_NO_CSE]"); } break; case GT_ADD: if (tree->gtFlags & GTF_ADDRMODE_NO_CSE) { chars += printf("[ADDRMODE_NO_CSE]"); } break; case GT_LSH: if (tree->gtFlags & GTF_ADDRMODE_NO_CSE) { chars += printf("[ADDRMODE_NO_CSE]"); } break; case GT_MOD: case GT_UMOD: break; case GT_EQ: case GT_NE: case GT_LT: case GT_LE: case GT_GT: case GT_GE: if (tree->gtFlags & GTF_RELOP_NAN_UN) { chars += printf("[RELOP_NAN_UN]"); } if (tree->gtFlags & GTF_RELOP_JMP_USED) { chars += printf("[RELOP_JMP_USED]"); } break; case GT_QMARK: if (tree->gtFlags & GTF_QMARK_CAST_INSTOF) { chars += printf("[QMARK_CAST_INSTOF]"); } break; case GT_BOX: if (tree->gtFlags & GTF_BOX_VALUE) { chars += printf("[BOX_VALUE]"); } break; case GT_CNS_INT: { GenTreeFlags handleKind = (tree->gtFlags & GTF_ICON_HDL_MASK); switch (handleKind) { case GTF_ICON_SCOPE_HDL: chars += printf("[ICON_SCOPE_HDL]"); break; case GTF_ICON_CLASS_HDL: chars += printf("[ICON_CLASS_HDL]"); break; case GTF_ICON_METHOD_HDL: chars += printf("[ICON_METHOD_HDL]"); break; case GTF_ICON_FIELD_HDL: chars += printf("[ICON_FIELD_HDL]"); break; case GTF_ICON_STATIC_HDL: chars += printf("[ICON_STATIC_HDL]"); break; case GTF_ICON_STR_HDL: chars += printf("[ICON_STR_HDL]"); break; case GTF_ICON_CONST_PTR: chars += printf("[ICON_CONST_PTR]"); break; case GTF_ICON_GLOBAL_PTR: chars += printf("[ICON_GLOBAL_PTR]"); break; case GTF_ICON_VARG_HDL: chars += printf("[ICON_VARG_HDL]"); break; case GTF_ICON_PINVKI_HDL: chars += printf("[ICON_PINVKI_HDL]"); break; case GTF_ICON_TOKEN_HDL: chars += printf("[ICON_TOKEN_HDL]"); break; case GTF_ICON_TLS_HDL: chars += printf("[ICON_TLD_HDL]"); break; case GTF_ICON_FTN_ADDR: chars += printf("[ICON_FTN_ADDR]"); break; case GTF_ICON_CIDMID_HDL: chars += printf("[ICON_CIDMID_HDL]"); break; case GTF_ICON_BBC_PTR: chars += printf("[ICON_BBC_PTR]"); break; case GTF_ICON_STATIC_BOX_PTR: chars += printf("[GTF_ICON_STATIC_BOX_PTR]"); break; case GTF_ICON_FIELD_OFF: chars += printf("[ICON_FIELD_OFF]"); break; default: assert(!"a forgotten handle flag"); break; } } break; case GT_OBJ: case GT_STORE_OBJ: if (tree->AsObj()->GetLayout()->HasGCPtr()) { chars += printf("[BLK_HASGCPTR]"); } FALLTHROUGH; case GT_BLK: case GT_STORE_BLK: case GT_STORE_DYN_BLK: if (tree->gtFlags & GTF_BLK_VOLATILE) { chars += printf("[BLK_VOLATILE]"); } if (tree->AsBlk()->IsUnaligned()) { chars += printf("[BLK_UNALIGNED]"); } break; case GT_CALL: if (tree->gtFlags & GTF_CALL_UNMANAGED) { chars += printf("[CALL_UNMANAGED]"); } if (tree->gtFlags & GTF_CALL_INLINE_CANDIDATE) { chars += printf("[CALL_INLINE_CANDIDATE]"); } if (!tree->AsCall()->IsVirtual()) { chars += printf("[CALL_NONVIRT]"); } if (tree->AsCall()->IsVirtualVtable()) { chars += printf("[CALL_VIRT_VTABLE]"); } if (tree->AsCall()->IsVirtualStub()) { chars += printf("[CALL_VIRT_STUB]"); } if (tree->gtFlags & GTF_CALL_NULLCHECK) { chars += printf("[CALL_NULLCHECK]"); } if (tree->gtFlags & GTF_CALL_POP_ARGS) { chars += printf("[CALL_POP_ARGS]"); } if (tree->gtFlags & GTF_CALL_HOISTABLE) { chars += printf("[CALL_HOISTABLE]"); } // More flags associated with calls. { GenTreeCall* call = tree->AsCall(); if (call->gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL) { chars += printf("[CALL_M_EXPLICIT_TAILCALL]"); } if (call->gtCallMoreFlags & GTF_CALL_M_TAILCALL) { chars += printf("[CALL_M_TAILCALL]"); } if (call->gtCallMoreFlags & GTF_CALL_M_VARARGS) { chars += printf("[CALL_M_VARARGS]"); } if (call->gtCallMoreFlags & GTF_CALL_M_RETBUFFARG) { chars += printf("[CALL_M_RETBUFFARG]"); } if (call->gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV) { chars += printf("[CALL_M_DELEGATE_INV]"); } if (call->gtCallMoreFlags & GTF_CALL_M_NOGCCHECK) { chars += printf("[CALL_M_NOGCCHECK]"); } if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) { chars += printf("[CALL_M_SPECIAL_INTRINSIC]"); } if (call->IsUnmanaged()) { if (call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { chars += printf("[CALL_M_UNMGD_THISCALL]"); } } else if (call->IsVirtualStub()) { if (call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT) { chars += printf("[CALL_M_VIRTSTUB_REL_INDIRECT]"); } } else if (!call->IsVirtual()) { if (call->gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS) { chars += printf("[CALL_M_NONVIRT_SAME_THIS]"); } } if (call->gtCallMoreFlags & GTF_CALL_M_FRAME_VAR_DEATH) { chars += printf("[CALL_M_FRAME_VAR_DEATH]"); } if (call->gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER) { chars += printf("[CALL_M_TAILCALL_VIA_JIT_HELPER]"); } #if FEATURE_TAILCALL_OPT if (call->gtCallMoreFlags & GTF_CALL_M_IMPLICIT_TAILCALL) { chars += printf("[CALL_M_IMPLICIT_TAILCALL]"); } #endif if (call->gtCallMoreFlags & GTF_CALL_M_PINVOKE) { chars += printf("[CALL_M_PINVOKE]"); } if (call->IsFatPointerCandidate()) { chars += printf("[CALL_FAT_POINTER_CANDIDATE]"); } if (call->IsGuarded()) { chars += printf("[CALL_GUARDED]"); } if (call->IsExpRuntimeLookup()) { chars += printf("[CALL_EXP_RUNTIME_LOOKUP]"); } } break; default: { GenTreeFlags flags = (tree->gtFlags & (~(GTF_COMMON_MASK | GTF_OVERFLOW))); if (flags != 0) { chars += printf("[%08X]", flags); } } break; } // Common flags. if (tree->gtFlags & GTF_ASG) { chars += printf("[ASG]"); } if (tree->gtFlags & GTF_CALL) { chars += printf("[CALL]"); } switch (op) { case GT_MUL: case GT_CAST: case GT_ADD: case GT_SUB: if (tree->gtFlags & GTF_OVERFLOW) { chars += printf("[OVERFLOW]"); } break; default: break; } if (tree->gtFlags & GTF_EXCEPT) { chars += printf("[EXCEPT]"); } if (tree->gtFlags & GTF_GLOB_REF) { chars += printf("[GLOB_REF]"); } if (tree->gtFlags & GTF_ORDER_SIDEEFF) { chars += printf("[ORDER_SIDEEFF]"); } if (tree->gtFlags & GTF_REVERSE_OPS) { if (op != GT_LCL_VAR) { chars += printf("[REVERSE_OPS]"); } } if (tree->gtFlags & GTF_SPILLED) { chars += printf("[SPILLED_OPER]"); } #if FEATURE_SET_FLAGS if (tree->gtFlags & GTF_SET_FLAGS) { if ((op != GT_IND) && (op != GT_STOREIND)) { chars += printf("[ZSF_SET_FLAGS]"); } } #endif if (tree->gtFlags & GTF_IND_NONFAULTING) { if (tree->OperIsIndirOrArrLength()) { chars += printf("[IND_NONFAULTING]"); } } if (tree->gtFlags & GTF_MAKE_CSE) { chars += printf("[MAKE_CSE]"); } if (tree->gtFlags & GTF_DONT_CSE) { chars += printf("[DONT_CSE]"); } if (tree->gtFlags & GTF_BOOLEAN) { chars += printf("[BOOLEAN]"); } if (tree->gtFlags & GTF_UNSIGNED) { chars += printf("[SMALL_UNSIGNED]"); } if (tree->gtFlags & GTF_LATE_ARG) { chars += printf("[SMALL_LATE_ARG]"); } if (tree->gtFlags & GTF_SPILL) { chars += printf("[SPILL]"); } if (tree->gtFlags & GTF_REUSE_REG_VAL) { if (op == GT_CNS_INT) { chars += printf("[REUSE_REG_VAL]"); } } } } void dTreeFlags(GenTree* tree) { cTreeFlags(JitTls::GetCompiler(), tree); } #endif // DEBUG #if VARSET_COUNTOPS // static BitSetSupport::BitSetOpCounter Compiler::m_varsetOpCounter("VarSetOpCounts.log"); #endif #if ALLVARSET_COUNTOPS // static BitSetSupport::BitSetOpCounter Compiler::m_allvarsetOpCounter("AllVarSetOpCounts.log"); #endif // static HelperCallProperties Compiler::s_helperCallProperties; /*****************************************************************************/ /*****************************************************************************/ //------------------------------------------------------------------------ // killGCRefs: // Given some tree node return does it need all GC refs to be spilled from // callee save registers. // // Arguments: // tree - the tree for which we ask about gc refs. // // Return Value: // true - tree kills GC refs on callee save registers // false - tree doesn't affect GC refs on callee save registers bool Compiler::killGCRefs(GenTree* tree) { if (tree->IsCall()) { GenTreeCall* call = tree->AsCall(); if (call->IsUnmanaged()) { return true; } if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_JIT_PINVOKE_BEGIN)) { assert(opts.ShouldUsePInvokeHelpers()); return true; } } else if (tree->OperIs(GT_START_PREEMPTGC)) { return true; } return false; } //------------------------------------------------------------------------ // lvaIsOSRLocal: check if this local var is one that requires special // treatment for OSR compilations. // // Arguments: // varNum - variable of interest // // Return Value: // true - this is an OSR compile and this local requires special treatment // false - not an OSR compile, or not an interesting local for OSR bool Compiler::lvaIsOSRLocal(unsigned varNum) { if (!opts.IsOSR()) { return false; } if (varNum < info.compLocalsCount) { return true; } LclVarDsc* varDsc = lvaGetDesc(varNum); if (varDsc->lvIsStructField) { return (varDsc->lvParentLcl < info.compLocalsCount); } return false; } //------------------------------------------------------------------------------ // gtTypeForNullCheck: helper to get the most optimal and correct type for nullcheck // // Arguments: // tree - the node for nullcheck; // var_types Compiler::gtTypeForNullCheck(GenTree* tree) { if (varTypeIsArithmetic(tree)) { #if defined(TARGET_XARCH) // Just an optimization for XARCH - smaller mov if (varTypeIsLong(tree)) { return TYP_INT; } #endif return tree->TypeGet(); } // for the rest: probe a single byte to avoid potential AVEs return TYP_BYTE; } //------------------------------------------------------------------------------ // gtChangeOperToNullCheck: helper to change tree oper to a NULLCHECK. // // Arguments: // tree - the node to change; // basicBlock - basic block of the node. // // Notes: // the function should not be called after lowering for platforms that do not support // emitting NULLCHECK nodes, like arm32. Use `Lowering::TransformUnusedIndirection` // that handles it and calls this function when appropriate. // void Compiler::gtChangeOperToNullCheck(GenTree* tree, BasicBlock* block) { assert(tree->OperIs(GT_FIELD, GT_IND, GT_OBJ, GT_BLK)); tree->ChangeOper(GT_NULLCHECK); tree->ChangeType(gtTypeForNullCheck(tree)); block->bbFlags |= BBF_HAS_NULLCHECK; optMethodFlags |= OMF_HAS_NULLCHECK; } #if defined(DEBUG) //------------------------------------------------------------------------------ // devirtualizationDetailToString: describe the detailed devirtualization reason // // Arguments: // detail - detail to describe // // Returns: // descriptive string // const char* Compiler::devirtualizationDetailToString(CORINFO_DEVIRTUALIZATION_DETAIL detail) { switch (detail) { case CORINFO_DEVIRTUALIZATION_UNKNOWN: return "unknown"; case CORINFO_DEVIRTUALIZATION_SUCCESS: return "success"; case CORINFO_DEVIRTUALIZATION_FAILED_CANON: return "object class was canonical"; case CORINFO_DEVIRTUALIZATION_FAILED_COM: return "object class was com"; case CORINFO_DEVIRTUALIZATION_FAILED_CAST: return "object class could not be cast to interface class"; case CORINFO_DEVIRTUALIZATION_FAILED_LOOKUP: return "interface method could not be found"; case CORINFO_DEVIRTUALIZATION_FAILED_DIM: return "interface method was default interface method"; case CORINFO_DEVIRTUALIZATION_FAILED_SUBCLASS: return "object not subclass of base class"; case CORINFO_DEVIRTUALIZATION_FAILED_SLOT: return "virtual method installed via explicit override"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE: return "devirtualization crossed version bubble"; case CORINFO_DEVIRTUALIZATION_MULTIPLE_IMPL: return "object class has multiple implementations of interface"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_CLASS_DECL: return "decl method is defined on class and decl method not in version bubble, and decl method not in " "type closest to version bubble"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_INTERFACE_DECL: return "decl method is defined on interface and not in version bubble, and implementation type not " "entirely defined in bubble"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_IMPL: return "object class not defined within version bubble"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_IMPL_NOT_REFERENCEABLE: return "object class cannot be referenced from R2R code due to missing tokens"; case CORINFO_DEVIRTUALIZATION_FAILED_DUPLICATE_INTERFACE: return "crossgen2 virtual method algorithm and runtime algorithm differ in the presence of duplicate " "interface implementations"; case CORINFO_DEVIRTUALIZATION_FAILED_DECL_NOT_REPRESENTABLE: return "Decl method cannot be represented in R2R image"; default: return "undefined"; } } #endif // defined(DEBUG) #if TRACK_ENREG_STATS Compiler::EnregisterStats Compiler::s_enregisterStats; void Compiler::EnregisterStats::RecordLocal(const LclVarDsc* varDsc) { m_totalNumberOfVars++; if (varDsc->TypeGet() == TYP_STRUCT) { m_totalNumberOfStructVars++; } if (!varDsc->lvDoNotEnregister) { m_totalNumberOfEnregVars++; if (varDsc->TypeGet() == TYP_STRUCT) { m_totalNumberOfStructEnregVars++; } } else { switch (varDsc->GetDoNotEnregReason()) { case DoNotEnregisterReason::AddrExposed: m_addrExposed++; break; case DoNotEnregisterReason::DontEnregStructs: m_dontEnregStructs++; break; case DoNotEnregisterReason::NotRegSizeStruct: m_notRegSizeStruct++; break; case DoNotEnregisterReason::LocalField: m_localField++; break; case DoNotEnregisterReason::VMNeedsStackAddr: m_VMNeedsStackAddr++; break; case DoNotEnregisterReason::LiveInOutOfHandler: m_liveInOutHndlr++; break; case DoNotEnregisterReason::BlockOp: m_blockOp++; break; case DoNotEnregisterReason::IsStructArg: m_structArg++; break; case DoNotEnregisterReason::DepField: m_depField++; break; case DoNotEnregisterReason::NoRegVars: m_noRegVars++; break; case DoNotEnregisterReason::MinOptsGC: m_minOptsGC++; break; #if !defined(TARGET_64BIT) case DoNotEnregisterReason::LongParamField: m_longParamField++; break; #endif #ifdef JIT32_GCENCODER case DoNotEnregisterReason::PinningRef: m_PinningRef++; break; #endif case DoNotEnregisterReason::LclAddrNode: m_lclAddrNode++; break; case DoNotEnregisterReason::CastTakesAddr: m_castTakesAddr++; break; case DoNotEnregisterReason::StoreBlkSrc: m_storeBlkSrc++; break; case DoNotEnregisterReason::OneAsgRetyping: m_oneAsgRetyping++; break; case DoNotEnregisterReason::SwizzleArg: m_swizzleArg++; break; case DoNotEnregisterReason::BlockOpRet: m_blockOpRet++; break; case DoNotEnregisterReason::ReturnSpCheck: m_returnSpCheck++; break; case DoNotEnregisterReason::SimdUserForcesDep: m_simdUserForcesDep++; break; default: unreached(); break; } if (varDsc->GetDoNotEnregReason() == DoNotEnregisterReason::AddrExposed) { // We can't `assert(IsAddressExposed())` because `fgAdjustForAddressExposedOrWrittenThis` // does not clear `m_doNotEnregReason` on `this`. switch (varDsc->GetAddrExposedReason()) { case AddressExposedReason::PARENT_EXPOSED: m_parentExposed++; break; case AddressExposedReason::TOO_CONSERVATIVE: m_tooConservative++; break; case AddressExposedReason::ESCAPE_ADDRESS: m_escapeAddress++; break; case AddressExposedReason::WIDE_INDIR: m_wideIndir++; break; case AddressExposedReason::OSR_EXPOSED: m_osrExposed++; break; case AddressExposedReason::STRESS_LCL_FLD: m_stressLclFld++; break; case AddressExposedReason::COPY_FLD_BY_FLD: m_copyFldByFld++; break; case AddressExposedReason::DISPATCH_RET_BUF: m_dispatchRetBuf++; break; default: unreached(); break; } } } } void Compiler::EnregisterStats::Dump(FILE* fout) const { const unsigned totalNumberOfNotStructVars = s_enregisterStats.m_totalNumberOfVars - s_enregisterStats.m_totalNumberOfStructVars; const unsigned totalNumberOfNotStructEnregVars = s_enregisterStats.m_totalNumberOfEnregVars - s_enregisterStats.m_totalNumberOfStructEnregVars; const unsigned notEnreg = s_enregisterStats.m_totalNumberOfVars - s_enregisterStats.m_totalNumberOfEnregVars; fprintf(fout, "\nLocals enregistration statistics:\n"); if (m_totalNumberOfVars == 0) { fprintf(fout, "No locals to report.\n"); return; } fprintf(fout, "total number of locals: %d, number of enregistered: %d, notEnreg: %d, ratio: %.2f\n", m_totalNumberOfVars, m_totalNumberOfEnregVars, m_totalNumberOfVars - m_totalNumberOfEnregVars, (float)m_totalNumberOfEnregVars / m_totalNumberOfVars); if (m_totalNumberOfStructVars != 0) { fprintf(fout, "total number of struct locals: %d, number of enregistered: %d, notEnreg: %d, ratio: %.2f\n", m_totalNumberOfStructVars, m_totalNumberOfStructEnregVars, m_totalNumberOfStructVars - m_totalNumberOfStructEnregVars, (float)m_totalNumberOfStructEnregVars / m_totalNumberOfStructVars); } const unsigned numberOfPrimitiveLocals = totalNumberOfNotStructVars - totalNumberOfNotStructEnregVars; if (numberOfPrimitiveLocals != 0) { fprintf(fout, "total number of primitive locals: %d, number of enregistered: %d, notEnreg: %d, ratio: %.2f\n", totalNumberOfNotStructVars, totalNumberOfNotStructEnregVars, numberOfPrimitiveLocals, (float)totalNumberOfNotStructEnregVars / totalNumberOfNotStructVars); } if (notEnreg == 0) { fprintf(fout, "All locals are enregistered.\n"); return; } #define PRINT_STATS(stat, total) \ if (stat != 0) \ { \ fprintf(fout, #stat " %d, ratio: %.2f\n", stat, (float)stat / total); \ } PRINT_STATS(m_addrExposed, notEnreg); PRINT_STATS(m_dontEnregStructs, notEnreg); PRINT_STATS(m_notRegSizeStruct, notEnreg); PRINT_STATS(m_localField, notEnreg); PRINT_STATS(m_VMNeedsStackAddr, notEnreg); PRINT_STATS(m_liveInOutHndlr, notEnreg); PRINT_STATS(m_blockOp, notEnreg); PRINT_STATS(m_structArg, notEnreg); PRINT_STATS(m_depField, notEnreg); PRINT_STATS(m_noRegVars, notEnreg); PRINT_STATS(m_minOptsGC, notEnreg); #if !defined(TARGET_64BIT) PRINT_STATS(m_longParamField, notEnreg); #endif // !TARGET_64BIT #ifdef JIT32_GCENCODER PRINT_STATS(m_PinningRef, notEnreg); #endif // JIT32_GCENCODER PRINT_STATS(m_lclAddrNode, notEnreg); PRINT_STATS(m_castTakesAddr, notEnreg); PRINT_STATS(m_storeBlkSrc, notEnreg); PRINT_STATS(m_oneAsgRetyping, notEnreg); PRINT_STATS(m_swizzleArg, notEnreg); PRINT_STATS(m_blockOpRet, notEnreg); PRINT_STATS(m_returnSpCheck, notEnreg); PRINT_STATS(m_simdUserForcesDep, notEnreg); fprintf(fout, "\nAddr exposed details:\n"); if (m_addrExposed == 0) { fprintf(fout, "\nNo address exposed locals to report.\n"); return; } PRINT_STATS(m_parentExposed, m_addrExposed); PRINT_STATS(m_tooConservative, m_addrExposed); PRINT_STATS(m_escapeAddress, m_addrExposed); PRINT_STATS(m_wideIndir, m_addrExposed); PRINT_STATS(m_osrExposed, m_addrExposed); PRINT_STATS(m_stressLclFld, m_addrExposed); PRINT_STATS(m_copyFldByFld, m_addrExposed); PRINT_STATS(m_dispatchRetBuf, m_addrExposed); } #endif // TRACK_ENREG_STATS
1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/jit/compiler.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XX Represents the method data we are currently JIT-compiling. XX XX An instance of this class is created for every method we JIT. XX XX This contains all the info needed for the method. So allocating a XX XX a new instance per method makes it thread-safe. XX XX It should be used to do all the memory management for the compiler run. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ #ifndef _COMPILER_H_ #define _COMPILER_H_ /*****************************************************************************/ #include "jit.h" #include "opcode.h" #include "varset.h" #include "jitstd.h" #include "jithashtable.h" #include "gentree.h" #include "debuginfo.h" #include "lir.h" #include "block.h" #include "inline.h" #include "jiteh.h" #include "instr.h" #include "regalloc.h" #include "sm.h" #include "cycletimer.h" #include "blockset.h" #include "arraystack.h" #include "hashbv.h" #include "jitexpandarray.h" #include "tinyarray.h" #include "valuenum.h" #include "jittelemetry.h" #include "namedintrinsiclist.h" #ifdef LATE_DISASM #include "disasm.h" #endif #include "codegeninterface.h" #include "regset.h" #include "jitgcinfo.h" #if DUMP_GC_TABLES && defined(JIT32_GCENCODER) #include "gcdump.h" #endif #include "emit.h" #include "hwintrinsic.h" #include "simd.h" #include "simdashwintrinsic.h" // This is only used locally in the JIT to indicate that // a verification block should be inserted #define SEH_VERIFICATION_EXCEPTION 0xe0564552 // VER /***************************************************************************** * Forward declarations */ struct InfoHdr; // defined in GCInfo.h struct escapeMapping_t; // defined in fgdiagnostic.cpp class emitter; // defined in emit.h struct ShadowParamVarInfo; // defined in GSChecks.cpp struct InitVarDscInfo; // defined in register_arg_convention.h class FgStack; // defined in fgbasic.cpp class Instrumentor; // defined in fgprofile.cpp class SpanningTreeVisitor; // defined in fgprofile.cpp class CSE_DataFlow; // defined in OptCSE.cpp class OptBoolsDsc; // defined in optimizer.cpp #ifdef DEBUG struct IndentStack; #endif class Lowering; // defined in lower.h // The following are defined in this file, Compiler.h class Compiler; /***************************************************************************** * Unwind info */ #include "unwind.h" /*****************************************************************************/ // // Declare global operator new overloads that use the compiler's arena allocator // // I wanted to make the second argument optional, with default = CMK_Unknown, but that // caused these to be ambiguous with the global placement new operators. void* __cdecl operator new(size_t n, Compiler* context, CompMemKind cmk); void* __cdecl operator new[](size_t n, Compiler* context, CompMemKind cmk); void* __cdecl operator new(size_t n, void* p, const jitstd::placement_t& syntax_difference); // Requires the definitions of "operator new" so including "LoopCloning.h" after the definitions. #include "loopcloning.h" /*****************************************************************************/ /* This is included here and not earlier as it needs the definition of "CSE" * which is defined in the section above */ /*****************************************************************************/ unsigned genLog2(unsigned value); unsigned genLog2(unsigned __int64 value); unsigned ReinterpretHexAsDecimal(unsigned in); /*****************************************************************************/ const unsigned FLG_CCTOR = (CORINFO_FLG_CONSTRUCTOR | CORINFO_FLG_STATIC); #ifdef DEBUG const int BAD_STK_OFFS = 0xBAADF00D; // for LclVarDsc::lvStkOffs #endif //------------------------------------------------------------------------ // HFA info shared by LclVarDsc and fgArgTabEntry //------------------------------------------------------------------------ inline bool IsHfa(CorInfoHFAElemType kind) { return kind != CORINFO_HFA_ELEM_NONE; } inline var_types HfaTypeFromElemKind(CorInfoHFAElemType kind) { switch (kind) { case CORINFO_HFA_ELEM_FLOAT: return TYP_FLOAT; case CORINFO_HFA_ELEM_DOUBLE: return TYP_DOUBLE; #ifdef FEATURE_SIMD case CORINFO_HFA_ELEM_VECTOR64: return TYP_SIMD8; case CORINFO_HFA_ELEM_VECTOR128: return TYP_SIMD16; #endif case CORINFO_HFA_ELEM_NONE: return TYP_UNDEF; default: assert(!"Invalid HfaElemKind"); return TYP_UNDEF; } } inline CorInfoHFAElemType HfaElemKindFromType(var_types type) { switch (type) { case TYP_FLOAT: return CORINFO_HFA_ELEM_FLOAT; case TYP_DOUBLE: return CORINFO_HFA_ELEM_DOUBLE; #ifdef FEATURE_SIMD case TYP_SIMD8: return CORINFO_HFA_ELEM_VECTOR64; case TYP_SIMD16: return CORINFO_HFA_ELEM_VECTOR128; #endif case TYP_UNDEF: return CORINFO_HFA_ELEM_NONE; default: assert(!"Invalid HFA Type"); return CORINFO_HFA_ELEM_NONE; } } // The following holds the Local var info (scope information) typedef const char* VarName; // Actual ASCII string struct VarScopeDsc { unsigned vsdVarNum; // (remapped) LclVarDsc number unsigned vsdLVnum; // 'which' in eeGetLVinfo(). // Also, it is the index of this entry in the info.compVarScopes array, // which is useful since the array is also accessed via the // compEnterScopeList and compExitScopeList sorted arrays. IL_OFFSET vsdLifeBeg; // instr offset of beg of life IL_OFFSET vsdLifeEnd; // instr offset of end of life #ifdef DEBUG VarName vsdName; // name of the var #endif }; // This class stores information associated with a LclVar SSA definition. class LclSsaVarDsc { // The basic block where the definition occurs. Definitions of uninitialized variables // are considered to occur at the start of the first basic block (fgFirstBB). // // TODO-Cleanup: In the case of uninitialized variables the block is set to nullptr by // SsaBuilder and changed to fgFirstBB during value numbering. It would be useful to // investigate and perhaps eliminate this rather unexpected behavior. BasicBlock* m_block; // The GT_ASG node that generates the definition, or nullptr for definitions // of uninitialized variables. GenTreeOp* m_asg; public: LclSsaVarDsc() : m_block(nullptr), m_asg(nullptr) { } LclSsaVarDsc(BasicBlock* block, GenTreeOp* asg) : m_block(block), m_asg(asg) { assert((asg == nullptr) || asg->OperIs(GT_ASG)); } BasicBlock* GetBlock() const { return m_block; } void SetBlock(BasicBlock* block) { m_block = block; } GenTreeOp* GetAssignment() const { return m_asg; } void SetAssignment(GenTreeOp* asg) { assert((asg == nullptr) || asg->OperIs(GT_ASG)); m_asg = asg; } ValueNumPair m_vnPair; }; // This class stores information associated with a memory SSA definition. class SsaMemDef { public: ValueNumPair m_vnPair; }; //------------------------------------------------------------------------ // SsaDefArray: A resizable array of SSA definitions. // // Unlike an ordinary resizable array implementation, this allows only element // addition (by calling AllocSsaNum) and has special handling for RESERVED_SSA_NUM // (basically it's a 1-based array). The array doesn't impose any particular // requirements on the elements it stores and AllocSsaNum forwards its arguments // to the array element constructor, this way the array supports both LclSsaVarDsc // and SsaMemDef elements. // template <typename T> class SsaDefArray { T* m_array; unsigned m_arraySize; unsigned m_count; static_assert_no_msg(SsaConfig::RESERVED_SSA_NUM == 0); static_assert_no_msg(SsaConfig::FIRST_SSA_NUM == 1); // Get the minimum valid SSA number. unsigned GetMinSsaNum() const { return SsaConfig::FIRST_SSA_NUM; } // Increase (double) the size of the array. void GrowArray(CompAllocator alloc) { unsigned oldSize = m_arraySize; unsigned newSize = max(2, oldSize * 2); T* newArray = alloc.allocate<T>(newSize); for (unsigned i = 0; i < oldSize; i++) { newArray[i] = m_array[i]; } m_array = newArray; m_arraySize = newSize; } public: // Construct an empty SsaDefArray. SsaDefArray() : m_array(nullptr), m_arraySize(0), m_count(0) { } // Reset the array (used only if the SSA form is reconstructed). void Reset() { m_count = 0; } // Allocate a new SSA number (starting with SsaConfig::FIRST_SSA_NUM). template <class... Args> unsigned AllocSsaNum(CompAllocator alloc, Args&&... args) { if (m_count == m_arraySize) { GrowArray(alloc); } unsigned ssaNum = GetMinSsaNum() + m_count; m_array[m_count++] = T(std::forward<Args>(args)...); // Ensure that the first SSA number we allocate is SsaConfig::FIRST_SSA_NUM assert((ssaNum == SsaConfig::FIRST_SSA_NUM) || (m_count > 1)); return ssaNum; } // Get the number of SSA definitions in the array. unsigned GetCount() const { return m_count; } // Get a pointer to the SSA definition at the specified index. T* GetSsaDefByIndex(unsigned index) { assert(index < m_count); return &m_array[index]; } // Check if the specified SSA number is valid. bool IsValidSsaNum(unsigned ssaNum) const { return (GetMinSsaNum() <= ssaNum) && (ssaNum < (GetMinSsaNum() + m_count)); } // Get a pointer to the SSA definition associated with the specified SSA number. T* GetSsaDef(unsigned ssaNum) { assert(ssaNum != SsaConfig::RESERVED_SSA_NUM); return GetSsaDefByIndex(ssaNum - GetMinSsaNum()); } // Get an SSA number associated with the specified SSA def (that must be in this array). unsigned GetSsaNum(T* ssaDef) { assert((m_array <= ssaDef) && (ssaDef < &m_array[m_count])); return GetMinSsaNum() + static_cast<unsigned>(ssaDef - &m_array[0]); } }; enum RefCountState { RCS_INVALID, // not valid to get/set ref counts RCS_EARLY, // early counts for struct promotion and struct passing RCS_NORMAL, // normal ref counts (from lvaMarkRefs onward) }; #ifdef DEBUG // Reasons why we can't enregister a local. enum class DoNotEnregisterReason { None, AddrExposed, // the address of this local is exposed. DontEnregStructs, // struct enregistration is disabled. NotRegSizeStruct, // the struct size does not much any register size, usually the struct size is too big. LocalField, // the local is accessed with LCL_FLD, note we can do it not only for struct locals. VMNeedsStackAddr, LiveInOutOfHandler, // the local is alive in and out of exception handler and not signle def. BlockOp, // Is read or written via a block operation. IsStructArg, // Is a struct passed as an argument in a way that requires a stack location. DepField, // It is a field of a dependently promoted struct NoRegVars, // opts.compFlags & CLFLG_REGVAR is not set MinOptsGC, // It is a GC Ref and we are compiling MinOpts #if !defined(TARGET_64BIT) LongParamField, // It is a decomposed field of a long parameter. #endif #ifdef JIT32_GCENCODER PinningRef, #endif LclAddrNode, // the local is accessed with LCL_ADDR_VAR/FLD. CastTakesAddr, StoreBlkSrc, // the local is used as STORE_BLK source. OneAsgRetyping, // fgMorphOneAsgBlockOp prevents this local from being enregister. SwizzleArg, // the local is passed using LCL_FLD as another type. BlockOpRet, // the struct is returned and it promoted or there is a cast. ReturnSpCheck, // the local is used to do SP check SimdUserForcesDep // a promoted struct was used by a SIMD/HWI node; it must be dependently promoted }; enum class AddressExposedReason { NONE, PARENT_EXPOSED, // This is a promoted field but the parent is exposed. TOO_CONSERVATIVE, // Were marked as exposed to be conservative, fix these places. ESCAPE_ADDRESS, // The address is escaping, for example, passed as call argument. WIDE_INDIR, // We access via indirection with wider type. OSR_EXPOSED, // It was exposed in the original method, osr has to repeat it. STRESS_LCL_FLD, // Stress mode replaces localVar with localFld and makes them addrExposed. COPY_FLD_BY_FLD, // Field by field copy takes the address of the local, can be fixed. DISPATCH_RET_BUF // Caller return buffer dispatch. }; #endif // DEBUG class LclVarDsc { public: // The constructor. Most things can just be zero'ed. // // Initialize the ArgRegs to REG_STK. // Morph will update if this local is passed in a register. LclVarDsc() : _lvArgReg(REG_STK) , #if FEATURE_MULTIREG_ARGS _lvOtherArgReg(REG_STK) , #endif // FEATURE_MULTIREG_ARGS lvClassHnd(NO_CLASS_HANDLE) , lvRefBlks(BlockSetOps::UninitVal()) , lvPerSsaData() { } // note this only packs because var_types is a typedef of unsigned char var_types lvType : 5; // TYP_INT/LONG/FLOAT/DOUBLE/REF unsigned char lvIsParam : 1; // is this a parameter? unsigned char lvIsRegArg : 1; // is this an argument that was passed by register? unsigned char lvFramePointerBased : 1; // 0 = off of REG_SPBASE (e.g., ESP), 1 = off of REG_FPBASE (e.g., EBP) unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame unsigned char lvRegister : 1; // assigned to live in a register? For RyuJIT backend, this is only set if the // variable is in the same register for the entire function. unsigned char lvTracked : 1; // is this a tracked variable? bool lvTrackedNonStruct() { return lvTracked && lvType != TYP_STRUCT; } unsigned char lvPinned : 1; // is this a pinned variable? unsigned char lvMustInit : 1; // must be initialized private: bool m_addrExposed : 1; // The address of this variable is "exposed" -- passed as an argument, stored in a // global location, etc. // We cannot reason reliably about the value of the variable. public: unsigned char lvDoNotEnregister : 1; // Do not enregister this variable. unsigned char lvFieldAccessed : 1; // The var is a struct local, and a field of the variable is accessed. Affects // struct promotion. unsigned char lvLiveInOutOfHndlr : 1; // The variable is live in or out of an exception handler, and therefore must // be on the stack (at least at those boundaries.) unsigned char lvInSsa : 1; // The variable is in SSA form (set by SsaBuilder) unsigned char lvIsCSE : 1; // Indicates if this LclVar is a CSE variable. unsigned char lvHasLdAddrOp : 1; // has ldloca or ldarga opcode on this local. unsigned char lvStackByref : 1; // This is a compiler temporary of TYP_BYREF that is known to point into our local // stack frame. unsigned char lvHasILStoreOp : 1; // there is at least one STLOC or STARG on this local unsigned char lvHasMultipleILStoreOp : 1; // there is more than one STLOC on this local unsigned char lvIsTemp : 1; // Short-lifetime compiler temp #if defined(TARGET_AMD64) || defined(TARGET_ARM64) unsigned char lvIsImplicitByRef : 1; // Set if the argument is an implicit byref. #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) unsigned char lvIsBoolean : 1; // set if variable is boolean unsigned char lvSingleDef : 1; // variable has a single def // before lvaMarkLocalVars: identifies ref type locals that can get type updates // after lvaMarkLocalVars: identifies locals that are suitable for optAddCopies unsigned char lvSingleDefRegCandidate : 1; // variable has a single def and hence is a register candidate // Currently, this is only used to decide if an EH variable can be // a register candiate or not. unsigned char lvDisqualifySingleDefRegCandidate : 1; // tracks variable that are disqualified from register // candidancy unsigned char lvSpillAtSingleDef : 1; // variable has a single def (as determined by LSRA interval scan) // and is spilled making it candidate to spill right after the // first (and only) definition. // Note: We cannot reuse lvSingleDefRegCandidate because it is set // in earlier phase and the information might not be appropriate // in LSRA. unsigned char lvDisqualify : 1; // variable is no longer OK for add copy optimization unsigned char lvVolatileHint : 1; // hint for AssertionProp #ifndef TARGET_64BIT unsigned char lvStructDoubleAlign : 1; // Must we double align this struct? #endif // !TARGET_64BIT #ifdef TARGET_64BIT unsigned char lvQuirkToLong : 1; // Quirk to allocate this LclVar as a 64-bit long #endif #ifdef DEBUG unsigned char lvKeepType : 1; // Don't change the type of this variable unsigned char lvNoLclFldStress : 1; // Can't apply local field stress on this one #endif unsigned char lvIsPtr : 1; // Might this be used in an address computation? (used by buffer overflow security // checks) unsigned char lvIsUnsafeBuffer : 1; // Does this contain an unsafe buffer requiring buffer overflow security checks? unsigned char lvPromoted : 1; // True when this local is a promoted struct, a normed struct, or a "split" long on a // 32-bit target. For implicit byref parameters, this gets hijacked between // fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to indicate whether // references to the arg are being rewritten as references to a promoted shadow local. unsigned char lvIsStructField : 1; // Is this local var a field of a promoted struct local? unsigned char lvOverlappingFields : 1; // True when we have a struct with possibly overlapping fields unsigned char lvContainsHoles : 1; // True when we have a promoted struct that contains holes unsigned char lvCustomLayout : 1; // True when this struct has "CustomLayout" unsigned char lvIsMultiRegArg : 1; // true if this is a multireg LclVar struct used in an argument context unsigned char lvIsMultiRegRet : 1; // true if this is a multireg LclVar struct assigned from a multireg call #ifdef FEATURE_HFA_FIELDS_PRESENT CorInfoHFAElemType _lvHfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA). #endif // FEATURE_HFA_FIELDS_PRESENT #ifdef DEBUG // TODO-Cleanup: See the note on lvSize() - this flag is only in use by asserts that are checking for struct // types, and is needed because of cases where TYP_STRUCT is bashed to an integral type. // Consider cleaning this up so this workaround is not required. unsigned char lvUnusedStruct : 1; // All references to this promoted struct are through its field locals. // I.e. there is no longer any reference to the struct directly. // In this case we can simply remove this struct local. unsigned char lvUndoneStructPromotion : 1; // The struct promotion was undone and hence there should be no // reference to the fields of this struct. #endif unsigned char lvLRACandidate : 1; // Tracked for linear scan register allocation purposes #ifdef FEATURE_SIMD // Note that both SIMD vector args and locals are marked as lvSIMDType = true, but the // type of an arg node is TYP_BYREF and a local node is TYP_SIMD*. unsigned char lvSIMDType : 1; // This is a SIMD struct unsigned char lvUsedInSIMDIntrinsic : 1; // This tells lclvar is used for simd intrinsic unsigned char lvSimdBaseJitType : 5; // Note: this only packs because CorInfoType has less than 32 entries CorInfoType GetSimdBaseJitType() const { return (CorInfoType)lvSimdBaseJitType; } void SetSimdBaseJitType(CorInfoType simdBaseJitType) { assert(simdBaseJitType < (1 << 5)); lvSimdBaseJitType = (unsigned char)simdBaseJitType; } var_types GetSimdBaseType() const; #endif // FEATURE_SIMD unsigned char lvRegStruct : 1; // This is a reg-sized non-field-addressed struct. unsigned char lvClassIsExact : 1; // lvClassHandle is the exact type #ifdef DEBUG unsigned char lvClassInfoUpdated : 1; // true if this var has updated class handle or exactness #endif unsigned char lvImplicitlyReferenced : 1; // true if there are non-IR references to this local (prolog, epilog, gc, // eh) unsigned char lvSuppressedZeroInit : 1; // local needs zero init if we transform tail call to loop unsigned char lvHasExplicitInit : 1; // The local is explicitly initialized and doesn't need zero initialization in // the prolog. If the local has gc pointers, there are no gc-safe points // between the prolog and the explicit initialization. union { unsigned lvFieldLclStart; // The index of the local var representing the first field in the promoted struct // local. For implicit byref parameters, this gets hijacked between // fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to point to the // struct local created to model the parameter's struct promotion, if any. unsigned lvParentLcl; // The index of the local var representing the parent (i.e. the promoted struct local). // Valid on promoted struct local fields. }; unsigned char lvFieldCnt; // Number of fields in the promoted VarDsc. unsigned char lvFldOffset; unsigned char lvFldOrdinal; #ifdef DEBUG unsigned char lvSingleDefDisqualifyReason = 'H'; #endif #if FEATURE_MULTIREG_ARGS regNumber lvRegNumForSlot(unsigned slotNum) { if (slotNum == 0) { return (regNumber)_lvArgReg; } else if (slotNum == 1) { return GetOtherArgReg(); } else { assert(false && "Invalid slotNum!"); } unreached(); } #endif // FEATURE_MULTIREG_ARGS CorInfoHFAElemType GetLvHfaElemKind() const { #ifdef FEATURE_HFA_FIELDS_PRESENT return _lvHfaElemKind; #else NOWAY_MSG("GetLvHfaElemKind"); return CORINFO_HFA_ELEM_NONE; #endif // FEATURE_HFA_FIELDS_PRESENT } void SetLvHfaElemKind(CorInfoHFAElemType elemKind) { #ifdef FEATURE_HFA_FIELDS_PRESENT _lvHfaElemKind = elemKind; #else NOWAY_MSG("SetLvHfaElemKind"); #endif // FEATURE_HFA_FIELDS_PRESENT } bool lvIsHfa() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetLvHfaElemKind()); } else { return false; } } bool lvIsHfaRegArg() const { if (GlobalJitOptions::compFeatureHfa) { return lvIsRegArg && lvIsHfa(); } else { return false; } } //------------------------------------------------------------------------------ // lvHfaSlots: Get the number of slots used by an HFA local // // Return Value: // On Arm64 - Returns 1-4 indicating the number of register slots used by the HFA // On Arm32 - Returns the total number of single FP register slots used by the HFA, max is 8 // unsigned lvHfaSlots() const { assert(lvIsHfa()); assert(varTypeIsStruct(lvType)); unsigned slots = 0; #ifdef TARGET_ARM slots = lvExactSize / sizeof(float); assert(slots <= 8); #elif defined(TARGET_ARM64) switch (GetLvHfaElemKind()) { case CORINFO_HFA_ELEM_NONE: assert(!"lvHfaSlots called for non-HFA"); break; case CORINFO_HFA_ELEM_FLOAT: assert((lvExactSize % 4) == 0); slots = lvExactSize >> 2; break; case CORINFO_HFA_ELEM_DOUBLE: case CORINFO_HFA_ELEM_VECTOR64: assert((lvExactSize % 8) == 0); slots = lvExactSize >> 3; break; case CORINFO_HFA_ELEM_VECTOR128: assert((lvExactSize % 16) == 0); slots = lvExactSize >> 4; break; default: unreached(); } assert(slots <= 4); #endif // TARGET_ARM64 return slots; } // lvIsMultiRegArgOrRet() // returns true if this is a multireg LclVar struct used in an argument context // or if this is a multireg LclVar struct assigned from a multireg call bool lvIsMultiRegArgOrRet() { return lvIsMultiRegArg || lvIsMultiRegRet; } #if defined(DEBUG) private: DoNotEnregisterReason m_doNotEnregReason; AddressExposedReason m_addrExposedReason; public: void SetDoNotEnregReason(DoNotEnregisterReason reason) { m_doNotEnregReason = reason; } DoNotEnregisterReason GetDoNotEnregReason() const { return m_doNotEnregReason; } AddressExposedReason GetAddrExposedReason() const { return m_addrExposedReason; } #endif // DEBUG public: void SetAddressExposed(bool value DEBUGARG(AddressExposedReason reason)) { m_addrExposed = value; INDEBUG(m_addrExposedReason = reason); } void CleanAddressExposed() { m_addrExposed = false; } bool IsAddressExposed() const { return m_addrExposed; } private: regNumberSmall _lvRegNum; // Used to store the register this variable is in (or, the low register of a // register pair). It is set during codegen any time the // variable is enregistered (lvRegister is only set // to non-zero if the variable gets the same register assignment for its entire // lifetime). #if !defined(TARGET_64BIT) regNumberSmall _lvOtherReg; // Used for "upper half" of long var. #endif // !defined(TARGET_64BIT) regNumberSmall _lvArgReg; // The (first) register in which this argument is passed. #if FEATURE_MULTIREG_ARGS regNumberSmall _lvOtherArgReg; // Used for the second part of the struct passed in a register. // Note this is defined but not used by ARM32 #endif // FEATURE_MULTIREG_ARGS regNumberSmall _lvArgInitReg; // the register into which the argument is moved at entry public: // The register number is stored in a small format (8 bits), but the getters return and the setters take // a full-size (unsigned) format, to localize the casts here. ///////////////////// regNumber GetRegNum() const { return (regNumber)_lvRegNum; } void SetRegNum(regNumber reg) { _lvRegNum = (regNumberSmall)reg; assert(_lvRegNum == reg); } ///////////////////// #if defined(TARGET_64BIT) regNumber GetOtherReg() const { assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 // "unreachable code" warnings return REG_NA; } void SetOtherReg(regNumber reg) { assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 // "unreachable code" warnings } #else // !TARGET_64BIT regNumber GetOtherReg() const { return (regNumber)_lvOtherReg; } void SetOtherReg(regNumber reg) { _lvOtherReg = (regNumberSmall)reg; assert(_lvOtherReg == reg); } #endif // !TARGET_64BIT ///////////////////// regNumber GetArgReg() const { return (regNumber)_lvArgReg; } void SetArgReg(regNumber reg) { _lvArgReg = (regNumberSmall)reg; assert(_lvArgReg == reg); } #if FEATURE_MULTIREG_ARGS regNumber GetOtherArgReg() const { return (regNumber)_lvOtherArgReg; } void SetOtherArgReg(regNumber reg) { _lvOtherArgReg = (regNumberSmall)reg; assert(_lvOtherArgReg == reg); } #endif // FEATURE_MULTIREG_ARGS #ifdef FEATURE_SIMD // Is this is a SIMD struct? bool lvIsSIMDType() const { return lvSIMDType; } // Is this is a SIMD struct which is used for SIMD intrinsic? bool lvIsUsedInSIMDIntrinsic() const { return lvUsedInSIMDIntrinsic; } #else // If feature_simd not enabled, return false bool lvIsSIMDType() const { return false; } bool lvIsUsedInSIMDIntrinsic() const { return false; } #endif ///////////////////// regNumber GetArgInitReg() const { return (regNumber)_lvArgInitReg; } void SetArgInitReg(regNumber reg) { _lvArgInitReg = (regNumberSmall)reg; assert(_lvArgInitReg == reg); } ///////////////////// bool lvIsRegCandidate() const { return lvLRACandidate != 0; } bool lvIsInReg() const { return lvIsRegCandidate() && (GetRegNum() != REG_STK); } regMaskTP lvRegMask() const { regMaskTP regMask = RBM_NONE; if (varTypeUsesFloatReg(TypeGet())) { if (GetRegNum() != REG_STK) { regMask = genRegMaskFloat(GetRegNum(), TypeGet()); } } else { if (GetRegNum() != REG_STK) { regMask = genRegMask(GetRegNum()); } } return regMask; } unsigned short lvVarIndex; // variable tracking index private: unsigned short m_lvRefCnt; // unweighted (real) reference count. For implicit by reference // parameters, this gets hijacked from fgResetImplicitByRefRefCount // through fgMarkDemotedImplicitByRefArgs, to provide a static // appearance count (computed during address-exposed analysis) // that fgMakeOutgoingStructArgCopy consults during global morph // to determine if eliding its copy is legal. weight_t m_lvRefCntWtd; // weighted reference count public: unsigned short lvRefCnt(RefCountState state = RCS_NORMAL) const; void incLvRefCnt(unsigned short delta, RefCountState state = RCS_NORMAL); void setLvRefCnt(unsigned short newValue, RefCountState state = RCS_NORMAL); weight_t lvRefCntWtd(RefCountState state = RCS_NORMAL) const; void incLvRefCntWtd(weight_t delta, RefCountState state = RCS_NORMAL); void setLvRefCntWtd(weight_t newValue, RefCountState state = RCS_NORMAL); private: int lvStkOffs; // stack offset of home in bytes. public: int GetStackOffset() const { return lvStkOffs; } void SetStackOffset(int offset) { lvStkOffs = offset; } unsigned lvExactSize; // (exact) size of the type in bytes // Is this a promoted struct? // This method returns true only for structs (including SIMD structs), not for // locals that are split on a 32-bit target. // It is only necessary to use this: // 1) if only structs are wanted, and // 2) if Lowering has already been done. // Otherwise lvPromoted is valid. bool lvPromotedStruct() { #if !defined(TARGET_64BIT) return (lvPromoted && !varTypeIsLong(lvType)); #else // defined(TARGET_64BIT) return lvPromoted; #endif // defined(TARGET_64BIT) } unsigned lvSize() const; size_t lvArgStackSize() const; unsigned lvSlotNum; // original slot # (if remapped) typeInfo lvVerTypeInfo; // type info needed for verification // class handle for the local or null if not known or not a class, // for a struct handle use `GetStructHnd()`. CORINFO_CLASS_HANDLE lvClassHnd; // Get class handle for a struct local or implicitByRef struct local. CORINFO_CLASS_HANDLE GetStructHnd() const { #ifdef FEATURE_SIMD if (lvSIMDType && (m_layout == nullptr)) { return NO_CLASS_HANDLE; } #endif assert(m_layout != nullptr); #if defined(TARGET_AMD64) || defined(TARGET_ARM64) assert(varTypeIsStruct(TypeGet()) || (lvIsImplicitByRef && (TypeGet() == TYP_BYREF))); #else assert(varTypeIsStruct(TypeGet())); #endif CORINFO_CLASS_HANDLE structHnd = m_layout->GetClassHandle(); assert(structHnd != NO_CLASS_HANDLE); return structHnd; } CORINFO_FIELD_HANDLE lvFieldHnd; // field handle for promoted struct fields private: ClassLayout* m_layout; // layout info for structs public: BlockSet lvRefBlks; // Set of blocks that contain refs Statement* lvDefStmt; // Pointer to the statement with the single definition void lvaDisqualifyVar(); // Call to disqualify a local variable from use in optAddCopies var_types TypeGet() const { return (var_types)lvType; } bool lvStackAligned() const { assert(lvIsStructField); return ((lvFldOffset % TARGET_POINTER_SIZE) == 0); } bool lvNormalizeOnLoad() const { return varTypeIsSmall(TypeGet()) && // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore. (lvIsParam || m_addrExposed || lvIsStructField); } bool lvNormalizeOnStore() const { return varTypeIsSmall(TypeGet()) && // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore. !(lvIsParam || m_addrExposed || lvIsStructField); } void incRefCnts(weight_t weight, Compiler* pComp, RefCountState state = RCS_NORMAL, bool propagate = true); var_types GetHfaType() const { if (GlobalJitOptions::compFeatureHfa) { assert(lvIsHfa()); return HfaTypeFromElemKind(GetLvHfaElemKind()); } else { return TYP_UNDEF; } } void SetHfaType(var_types type) { if (GlobalJitOptions::compFeatureHfa) { CorInfoHFAElemType elemKind = HfaElemKindFromType(type); SetLvHfaElemKind(elemKind); // Ensure we've allocated enough bits. assert(GetLvHfaElemKind() == elemKind); } } // Returns true if this variable contains GC pointers (including being a GC pointer itself). bool HasGCPtr() const { return varTypeIsGC(lvType) || ((lvType == TYP_STRUCT) && m_layout->HasGCPtr()); } // Returns the layout of a struct variable. ClassLayout* GetLayout() const { assert(varTypeIsStruct(lvType)); return m_layout; } // Sets the layout of a struct variable. void SetLayout(ClassLayout* layout) { assert(varTypeIsStruct(lvType)); assert((m_layout == nullptr) || ClassLayout::AreCompatible(m_layout, layout)); m_layout = layout; } SsaDefArray<LclSsaVarDsc> lvPerSsaData; // Returns the address of the per-Ssa data for the given ssaNum (which is required // not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is // not an SSA variable). LclSsaVarDsc* GetPerSsaData(unsigned ssaNum) { return lvPerSsaData.GetSsaDef(ssaNum); } // Returns the SSA number for "ssaDef". Requires "ssaDef" to be a valid definition // of this variable. unsigned GetSsaNumForSsaDef(LclSsaVarDsc* ssaDef) { return lvPerSsaData.GetSsaNum(ssaDef); } var_types GetRegisterType(const GenTreeLclVarCommon* tree) const; var_types GetRegisterType() const; var_types GetActualRegisterType() const; bool IsEnregisterableType() const { return GetRegisterType() != TYP_UNDEF; } bool IsEnregisterableLcl() const { if (lvDoNotEnregister) { return false; } return IsEnregisterableType(); } //----------------------------------------------------------------------------- // IsAlwaysAliveInMemory: Determines if this variable's value is always // up-to-date on stack. This is possible if this is an EH-var or // we decided to spill after single-def. // bool IsAlwaysAliveInMemory() const { return lvLiveInOutOfHndlr || lvSpillAtSingleDef; } bool CanBeReplacedWithItsField(Compiler* comp) const; #ifdef DEBUG public: const char* lvReason; void PrintVarReg() const { printf("%s", getRegName(GetRegNum())); } #endif // DEBUG }; // class LclVarDsc enum class SymbolicIntegerValue : int32_t { LongMin, IntMin, ShortMin, ByteMin, Zero, One, ByteMax, UByteMax, ShortMax, UShortMax, IntMax, UIntMax, LongMax, }; inline constexpr bool operator>(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) > static_cast<int32_t>(right); } inline constexpr bool operator>=(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) >= static_cast<int32_t>(right); } inline constexpr bool operator<(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) < static_cast<int32_t>(right); } inline constexpr bool operator<=(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) <= static_cast<int32_t>(right); } // Represents an integral range useful for reasoning about integral casts. // It uses a symbolic representation for lower and upper bounds so // that it can efficiently handle integers of all sizes on all hosts. // // Note that the ranges represented by this class are **always** in the // "signed" domain. This is so that if we know the range a node produces, it // can be trivially used to determine if a cast above the node does or does not // overflow, which requires that the interpretation of integers be the same both // for the "input" and "output". We choose signed interpretation here because it // produces nice continuous ranges and because IR uses sign-extension for constants. // // Some examples of how ranges are computed for casts: // 1. CAST_OVF(ubyte <- uint): does not overflow for [0..UBYTE_MAX], produces the // same range - all casts that do not change the representation, i. e. have the same // "actual" input and output type, have the same "input" and "output" range. // 2. CAST_OVF(ulong <- uint): never oveflows => the "input" range is [INT_MIN..INT_MAX] // (aka all possible 32 bit integers). Produces [0..UINT_MAX] (aka all possible 32 // bit integers zero-extended to 64 bits). // 3. CAST_OVF(int <- uint): overflows for inputs larger than INT_MAX <=> less than 0 // when interpreting as signed => the "input" range is [0..INT_MAX], the same range // being the produced one as the node does not change the width of the integer. // class IntegralRange { private: SymbolicIntegerValue m_lowerBound; SymbolicIntegerValue m_upperBound; public: IntegralRange() = default; IntegralRange(SymbolicIntegerValue lowerBound, SymbolicIntegerValue upperBound) : m_lowerBound(lowerBound), m_upperBound(upperBound) { assert(lowerBound <= upperBound); } bool Contains(int64_t value) const; bool Contains(IntegralRange other) const { return (m_lowerBound <= other.m_lowerBound) && (other.m_upperBound <= m_upperBound); } bool IsPositive() { return m_lowerBound >= SymbolicIntegerValue::Zero; } bool Equals(IntegralRange other) const { return (m_lowerBound == other.m_lowerBound) && (m_upperBound == other.m_upperBound); } static int64_t SymbolicToRealValue(SymbolicIntegerValue value); static SymbolicIntegerValue LowerBoundForType(var_types type); static SymbolicIntegerValue UpperBoundForType(var_types type); static IntegralRange ForType(var_types type) { return {LowerBoundForType(type), UpperBoundForType(type)}; } static IntegralRange ForNode(GenTree* node, Compiler* compiler); static IntegralRange ForCastInput(GenTreeCast* cast); static IntegralRange ForCastOutput(GenTreeCast* cast); #ifdef DEBUG static void Print(IntegralRange range); #endif // DEBUG }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX TempsInfo XX XX XX XX The temporary lclVars allocated by the compiler for code generation XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /***************************************************************************** * * The following keeps track of temporaries allocated in the stack frame * during code-generation (after register allocation). These spill-temps are * only used if we run out of registers while evaluating a tree. * * These are different from the more common temps allocated by lvaGrabTemp(). */ class TempDsc { public: TempDsc* tdNext; private: int tdOffs; #ifdef DEBUG static const int BAD_TEMP_OFFSET = 0xDDDDDDDD; // used as a sentinel "bad value" for tdOffs in DEBUG #endif // DEBUG int tdNum; BYTE tdSize; var_types tdType; public: TempDsc(int _tdNum, unsigned _tdSize, var_types _tdType) : tdNum(_tdNum), tdSize((BYTE)_tdSize), tdType(_tdType) { #ifdef DEBUG // temps must have a negative number (so they have a different number from all local variables) assert(tdNum < 0); tdOffs = BAD_TEMP_OFFSET; #endif // DEBUG if (tdNum != _tdNum) { IMPL_LIMITATION("too many spill temps"); } } #ifdef DEBUG bool tdLegalOffset() const { return tdOffs != BAD_TEMP_OFFSET; } #endif // DEBUG int tdTempOffs() const { assert(tdLegalOffset()); return tdOffs; } void tdSetTempOffs(int offs) { tdOffs = offs; assert(tdLegalOffset()); } void tdAdjustTempOffs(int offs) { tdOffs += offs; assert(tdLegalOffset()); } int tdTempNum() const { assert(tdNum < 0); return tdNum; } unsigned tdTempSize() const { return tdSize; } var_types tdTempType() const { return tdType; } }; // interface to hide linearscan implementation from rest of compiler class LinearScanInterface { public: virtual void doLinearScan() = 0; virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb) = 0; virtual bool willEnregisterLocalVars() const = 0; #if TRACK_LSRA_STATS virtual void dumpLsraStatsCsv(FILE* file) = 0; virtual void dumpLsraStatsSummary(FILE* file) = 0; #endif // TRACK_LSRA_STATS }; LinearScanInterface* getLinearScanAllocator(Compiler* comp); // Information about arrays: their element type and size, and the offset of the first element. // We label GT_IND's that are array indices with GTF_IND_ARR_INDEX, and, for such nodes, // associate an array info via the map retrieved by GetArrayInfoMap(). This information is used, // for example, in value numbering of array index expressions. struct ArrayInfo { var_types m_elemType; CORINFO_CLASS_HANDLE m_elemStructType; unsigned m_elemSize; unsigned m_elemOffset; ArrayInfo() : m_elemType(TYP_UNDEF), m_elemStructType(nullptr), m_elemSize(0), m_elemOffset(0) { } ArrayInfo(var_types elemType, unsigned elemSize, unsigned elemOffset, CORINFO_CLASS_HANDLE elemStructType) : m_elemType(elemType), m_elemStructType(elemStructType), m_elemSize(elemSize), m_elemOffset(elemOffset) { } }; // This enumeration names the phases into which we divide compilation. The phases should completely // partition a compilation. enum Phases { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) enum_nm, #include "compphases.h" PHASE_NUMBER_OF }; extern const char* PhaseNames[]; extern const char* PhaseEnums[]; extern const LPCWSTR PhaseShortNames[]; // Specify which checks should be run after each phase // enum class PhaseChecks { CHECK_NONE, CHECK_ALL }; // Specify compiler data that a phase might modify enum class PhaseStatus : unsigned { MODIFIED_NOTHING, MODIFIED_EVERYTHING }; // The following enum provides a simple 1:1 mapping to CLR API's enum API_ICorJitInfo_Names { #define DEF_CLR_API(name) API_##name, #include "ICorJitInfo_API_names.h" API_COUNT }; //--------------------------------------------------------------- // Compilation time. // // A "CompTimeInfo" is a structure for tracking the compilation time of one or more methods. // We divide a compilation into a sequence of contiguous phases, and track the total (per-thread) cycles // of the compilation, as well as the cycles for each phase. We also track the number of bytecodes. // If there is a failure in reading a timer at any point, the "CompTimeInfo" becomes invalid, as indicated // by "m_timerFailure" being true. // If FEATURE_JIT_METHOD_PERF is not set, we define a minimal form of this, enough to let other code compile. struct CompTimeInfo { #ifdef FEATURE_JIT_METHOD_PERF // The string names of the phases. static const char* PhaseNames[]; static bool PhaseHasChildren[]; static int PhaseParent[]; static bool PhaseReportsIRSize[]; unsigned m_byteCodeBytes; unsigned __int64 m_totalCycles; unsigned __int64 m_invokesByPhase[PHASE_NUMBER_OF]; unsigned __int64 m_cyclesByPhase[PHASE_NUMBER_OF]; #if MEASURE_CLRAPI_CALLS unsigned __int64 m_CLRinvokesByPhase[PHASE_NUMBER_OF]; unsigned __int64 m_CLRcyclesByPhase[PHASE_NUMBER_OF]; #endif unsigned m_nodeCountAfterPhase[PHASE_NUMBER_OF]; // For better documentation, we call EndPhase on // non-leaf phases. We should also call EndPhase on the // last leaf subphase; obviously, the elapsed cycles between the EndPhase // for the last leaf subphase and the EndPhase for an ancestor should be very small. // We add all such "redundant end phase" intervals to this variable below; we print // it out in a report, so we can verify that it is, indeed, very small. If it ever // isn't, this means that we're doing something significant between the end of the last // declared subphase and the end of its parent. unsigned __int64 m_parentPhaseEndSlop; bool m_timerFailure; #if MEASURE_CLRAPI_CALLS // The following measures the time spent inside each individual CLR API call. unsigned m_allClrAPIcalls; unsigned m_perClrAPIcalls[API_ICorJitInfo_Names::API_COUNT]; unsigned __int64 m_allClrAPIcycles; unsigned __int64 m_perClrAPIcycles[API_ICorJitInfo_Names::API_COUNT]; unsigned __int32 m_maxClrAPIcycles[API_ICorJitInfo_Names::API_COUNT]; #endif // MEASURE_CLRAPI_CALLS CompTimeInfo(unsigned byteCodeBytes); #endif }; #ifdef FEATURE_JIT_METHOD_PERF #if MEASURE_CLRAPI_CALLS struct WrapICorJitInfo; #endif // This class summarizes the JIT time information over the course of a run: the number of methods compiled, // and the total and maximum timings. (These are instances of the "CompTimeInfo" type described above). // The operation of adding a single method's timing to the summary may be performed concurrently by several // threads, so it is protected by a lock. // This class is intended to be used as a singleton type, with only a single instance. class CompTimeSummaryInfo { // This lock protects the fields of all CompTimeSummaryInfo(s) (of which we expect there to be one). static CritSecObject s_compTimeSummaryLock; int m_numMethods; int m_totMethods; CompTimeInfo m_total; CompTimeInfo m_maximum; int m_numFilteredMethods; CompTimeInfo m_filtered; // This can use what ever data you want to determine if the value to be added // belongs in the filtered section (it's always included in the unfiltered section) bool IncludedInFilteredData(CompTimeInfo& info); public: // This is the unique CompTimeSummaryInfo object for this instance of the runtime. static CompTimeSummaryInfo s_compTimeSummary; CompTimeSummaryInfo() : m_numMethods(0), m_totMethods(0), m_total(0), m_maximum(0), m_numFilteredMethods(0), m_filtered(0) { } // Assumes that "info" is a completed CompTimeInfo for a compilation; adds it to the summary. // This is thread safe. void AddInfo(CompTimeInfo& info, bool includePhases); // Print the summary information to "f". // This is not thread-safe; assumed to be called by only one thread. void Print(FILE* f); }; // A JitTimer encapsulates a CompTimeInfo for a single compilation. It also tracks the start of compilation, // and when the current phase started. This is intended to be part of a Compilation object. // class JitTimer { unsigned __int64 m_start; // Start of the compilation. unsigned __int64 m_curPhaseStart; // Start of the current phase. #if MEASURE_CLRAPI_CALLS unsigned __int64 m_CLRcallStart; // Start of the current CLR API call (if any). unsigned __int64 m_CLRcallInvokes; // CLR API invokes under current outer so far unsigned __int64 m_CLRcallCycles; // CLR API cycles under current outer so far. int m_CLRcallAPInum; // The enum/index of the current CLR API call (or -1). static double s_cyclesPerSec; // Cached for speedier measurements #endif #ifdef DEBUG Phases m_lastPhase; // The last phase that was completed (or (Phases)-1 to start). #endif CompTimeInfo m_info; // The CompTimeInfo for this compilation. static CritSecObject s_csvLock; // Lock to protect the time log file. static FILE* s_csvFile; // The time log file handle. void PrintCsvMethodStats(Compiler* comp); private: void* operator new(size_t); void* operator new[](size_t); void operator delete(void*); void operator delete[](void*); public: // Initialized the timer instance JitTimer(unsigned byteCodeSize); static JitTimer* Create(Compiler* comp, unsigned byteCodeSize) { return ::new (comp, CMK_Unknown) JitTimer(byteCodeSize); } static void PrintCsvHeader(); // Ends the current phase (argument is for a redundant check). void EndPhase(Compiler* compiler, Phases phase); #if MEASURE_CLRAPI_CALLS // Start and end a timed CLR API call. void CLRApiCallEnter(unsigned apix); void CLRApiCallLeave(unsigned apix); #endif // MEASURE_CLRAPI_CALLS // Completes the timing of the current method, which is assumed to have "byteCodeBytes" bytes of bytecode, // and adds it to "sum". void Terminate(Compiler* comp, CompTimeSummaryInfo& sum, bool includePhases); // Attempts to query the cycle counter of the current thread. If successful, returns "true" and sets // *cycles to the cycle counter value. Otherwise, returns false and sets the "m_timerFailure" flag of // "m_info" to true. bool GetThreadCycles(unsigned __int64* cycles) { bool res = CycleTimer::GetThreadCyclesS(cycles); if (!res) { m_info.m_timerFailure = true; } return res; } static void Shutdown(); }; #endif // FEATURE_JIT_METHOD_PERF //------------------- Function/Funclet info ------------------------------- enum FuncKind : BYTE { FUNC_ROOT, // The main/root function (always id==0) FUNC_HANDLER, // a funclet associated with an EH handler (finally, fault, catch, filter handler) FUNC_FILTER, // a funclet associated with an EH filter FUNC_COUNT }; class emitLocation; struct FuncInfoDsc { FuncKind funKind; BYTE funFlags; // Currently unused, just here for padding unsigned short funEHIndex; // index, into the ebd table, of innermost EH clause corresponding to this // funclet. It is only valid if funKind field indicates this is a // EH-related funclet: FUNC_HANDLER or FUNC_FILTER #if defined(TARGET_AMD64) // TODO-AMD64-Throughput: make the AMD64 info more like the ARM info to avoid having this large static array. emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; UNWIND_INFO unwindHeader; // Maximum of 255 UNWIND_CODE 'nodes' and then the unwind header. If there are an odd // number of codes, the VM or Zapper will 4-byte align the whole thing. BYTE unwindCodes[offsetof(UNWIND_INFO, UnwindCode) + (0xFF * sizeof(UNWIND_CODE))]; unsigned unwindCodeSlot; #elif defined(TARGET_X86) emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; #elif defined(TARGET_ARMARCH) UnwindInfo uwi; // Unwind information for this function/funclet's hot section UnwindInfo* uwiCold; // Unwind information for this function/funclet's cold section // Note: we only have a pointer here instead of the actual object, // to save memory in the JIT case (compared to the NGEN case), // where we don't have any cold section. // Note 2: we currently don't support hot/cold splitting in functions // with EH, so uwiCold will be NULL for all funclets. emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; #endif // TARGET_ARMARCH #if defined(FEATURE_CFI_SUPPORT) jitstd::vector<CFI_CODE>* cfiCodes; #endif // FEATURE_CFI_SUPPORT // Eventually we may want to move rsModifiedRegsMask, lvaOutgoingArgSize, and anything else // that isn't shared between the main function body and funclets. }; struct fgArgTabEntry { GenTreeCall::Use* use; // Points to the argument's GenTreeCall::Use in gtCallArgs or gtCallThisArg. GenTreeCall::Use* lateUse; // Points to the argument's GenTreeCall::Use in gtCallLateArgs, if any. // Get the node that coresponds to this argument entry. // This is the "real" node and not a placeholder or setup node. GenTree* GetNode() const { return lateUse == nullptr ? use->GetNode() : lateUse->GetNode(); } unsigned argNum; // The original argument number, also specifies the required argument evaluation order from the IL private: regNumberSmall regNums[MAX_ARG_REG_COUNT]; // The registers to use when passing this argument, set to REG_STK for // arguments passed on the stack public: unsigned numRegs; // Count of number of registers that this argument uses. // Note that on ARM, if we have a double hfa, this reflects the number // of DOUBLE registers. #if defined(UNIX_AMD64_ABI) // Unix amd64 will split floating point types and integer types in structs // between floating point and general purpose registers. Keep track of that // information so we do not need to recompute it later. unsigned structIntRegs; unsigned structFloatRegs; #endif // UNIX_AMD64_ABI #if defined(DEBUG_ARG_SLOTS) // These fields were used to calculate stack size in stack slots for arguments // but now they are replaced by precise `m_byteOffset/m_byteSize` because of // arm64 apple abi requirements. // A slot is a pointer sized region in the OutArg area. unsigned slotNum; // When an argument is passed in the OutArg area this is the slot number in the OutArg area unsigned numSlots; // Count of number of slots that this argument uses #endif // DEBUG_ARG_SLOTS // Return number of stack slots that this argument is taking. // TODO-Cleanup: this function does not align with arm64 apple model, // delete it. In most cases we just want to know if we it is using stack or not // but in some cases we are checking if it is a multireg arg, like: // `numRegs + GetStackSlotsNumber() > 1` that is harder to replace. // unsigned GetStackSlotsNumber() const { return roundUp(GetStackByteSize(), TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; } private: unsigned _lateArgInx; // index into gtCallLateArgs list; UINT_MAX if this is not a late arg. public: unsigned tmpNum; // the LclVar number if we had to force evaluation of this arg var_types argType; // The type used to pass this argument. This is generally the original argument type, but when a // struct is passed as a scalar type, this is that type. // Note that if a struct is passed by reference, this will still be the struct type. bool needTmp : 1; // True when we force this argument's evaluation into a temp LclVar bool needPlace : 1; // True when we must replace this argument with a placeholder node bool isTmp : 1; // True when we setup a temp LclVar for this argument due to size issues with the struct bool processed : 1; // True when we have decided the evaluation order for this argument in the gtCallLateArgs bool isBackFilled : 1; // True when the argument fills a register slot skipped due to alignment requirements of // previous arguments. NonStandardArgKind nonStandardArgKind : 4; // The non-standard arg kind. Non-standard args are args that are forced // to be in certain registers or on the stack, regardless of where they // appear in the arg list. bool isStruct : 1; // True if this is a struct arg bool _isVararg : 1; // True if the argument is in a vararg context. bool passedByRef : 1; // True iff the argument is passed by reference. #if FEATURE_ARG_SPLIT bool _isSplit : 1; // True when this argument is split between the registers and OutArg area #endif // FEATURE_ARG_SPLIT #ifdef FEATURE_HFA_FIELDS_PRESENT CorInfoHFAElemType _hfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA). #endif CorInfoHFAElemType GetHfaElemKind() const { #ifdef FEATURE_HFA_FIELDS_PRESENT return _hfaElemKind; #else NOWAY_MSG("GetHfaElemKind"); return CORINFO_HFA_ELEM_NONE; #endif } void SetHfaElemKind(CorInfoHFAElemType elemKind) { #ifdef FEATURE_HFA_FIELDS_PRESENT _hfaElemKind = elemKind; #else NOWAY_MSG("SetHfaElemKind"); #endif } bool isNonStandard() const { return nonStandardArgKind != NonStandardArgKind::None; } // Returns true if the IR node for this non-standarg arg is added by fgInitArgInfo. // In this case, it must be removed by GenTreeCall::ResetArgInfo. bool isNonStandardArgAddedLate() const { switch (static_cast<NonStandardArgKind>(nonStandardArgKind)) { case NonStandardArgKind::None: case NonStandardArgKind::PInvokeFrame: case NonStandardArgKind::ShiftLow: case NonStandardArgKind::ShiftHigh: case NonStandardArgKind::FixedRetBuffer: case NonStandardArgKind::ValidateIndirectCallTarget: return false; case NonStandardArgKind::WrapperDelegateCell: case NonStandardArgKind::VirtualStubCell: case NonStandardArgKind::PInvokeCookie: case NonStandardArgKind::PInvokeTarget: case NonStandardArgKind::R2RIndirectionCell: return true; default: unreached(); } } bool isLateArg() const { bool isLate = (_lateArgInx != UINT_MAX); return isLate; } unsigned GetLateArgInx() const { assert(isLateArg()); return _lateArgInx; } void SetLateArgInx(unsigned inx) { _lateArgInx = inx; } regNumber GetRegNum() const { return (regNumber)regNums[0]; } regNumber GetOtherRegNum() const { return (regNumber)regNums[1]; } #if defined(UNIX_AMD64_ABI) SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; #endif void setRegNum(unsigned int i, regNumber regNum) { assert(i < MAX_ARG_REG_COUNT); regNums[i] = (regNumberSmall)regNum; } regNumber GetRegNum(unsigned int i) { assert(i < MAX_ARG_REG_COUNT); return (regNumber)regNums[i]; } bool IsSplit() const { #if FEATURE_ARG_SPLIT return compFeatureArgSplit() && _isSplit; #else // FEATURE_ARG_SPLIT return false; #endif } void SetSplit(bool value) { #if FEATURE_ARG_SPLIT _isSplit = value; #endif } bool IsVararg() const { return compFeatureVarArg() && _isVararg; } void SetIsVararg(bool value) { if (compFeatureVarArg()) { _isVararg = value; } } bool IsHfaArg() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetHfaElemKind()); } else { return false; } } bool IsHfaRegArg() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetHfaElemKind()) && isPassedInRegisters(); } else { return false; } } unsigned intRegCount() const { #if defined(UNIX_AMD64_ABI) if (this->isStruct) { return this->structIntRegs; } #endif // defined(UNIX_AMD64_ABI) if (!this->isPassedInFloatRegisters()) { return this->numRegs; } return 0; } unsigned floatRegCount() const { #if defined(UNIX_AMD64_ABI) if (this->isStruct) { return this->structFloatRegs; } #endif // defined(UNIX_AMD64_ABI) if (this->isPassedInFloatRegisters()) { return this->numRegs; } return 0; } // Get the number of bytes that this argument is occupying on the stack, // including padding up to the target pointer size for platforms // where a stack argument can't take less. unsigned GetStackByteSize() const { if (!IsSplit() && numRegs > 0) { return 0; } assert(!IsHfaArg() || !IsSplit()); assert(GetByteSize() > TARGET_POINTER_SIZE * numRegs); const unsigned stackByteSize = GetByteSize() - TARGET_POINTER_SIZE * numRegs; return stackByteSize; } var_types GetHfaType() const { if (GlobalJitOptions::compFeatureHfa) { return HfaTypeFromElemKind(GetHfaElemKind()); } else { return TYP_UNDEF; } } void SetHfaType(var_types type, unsigned hfaSlots) { if (GlobalJitOptions::compFeatureHfa) { if (type != TYP_UNDEF) { // We must already have set the passing mode. assert(numRegs != 0 || GetStackByteSize() != 0); // We originally set numRegs according to the size of the struct, but if the size of the // hfaType is not the same as the pointer size, we need to correct it. // Note that hfaSlots is the number of registers we will use. For ARM, that is twice // the number of "double registers". unsigned numHfaRegs = hfaSlots; #ifdef TARGET_ARM if (type == TYP_DOUBLE) { // Must be an even number of registers. assert((numRegs & 1) == 0); numHfaRegs = hfaSlots / 2; } #endif // TARGET_ARM if (!IsHfaArg()) { // We haven't previously set this; do so now. CorInfoHFAElemType elemKind = HfaElemKindFromType(type); SetHfaElemKind(elemKind); // Ensure we've allocated enough bits. assert(GetHfaElemKind() == elemKind); if (isPassedInRegisters()) { numRegs = numHfaRegs; } } else { // We've already set this; ensure that it's consistent. if (isPassedInRegisters()) { assert(numRegs == numHfaRegs); } assert(type == HfaTypeFromElemKind(GetHfaElemKind())); } } } } #ifdef TARGET_ARM void SetIsBackFilled(bool backFilled) { isBackFilled = backFilled; } bool IsBackFilled() const { return isBackFilled; } #else // !TARGET_ARM void SetIsBackFilled(bool backFilled) { } bool IsBackFilled() const { return false; } #endif // !TARGET_ARM bool isPassedInRegisters() const { return !IsSplit() && (numRegs != 0); } bool isPassedInFloatRegisters() const { #ifdef TARGET_X86 return false; #else return isValidFloatArgReg(GetRegNum()); #endif } // Can we replace the struct type of this node with a primitive type for argument passing? bool TryPassAsPrimitive() const { return !IsSplit() && ((numRegs == 1) || (m_byteSize <= TARGET_POINTER_SIZE)); } #if defined(DEBUG_ARG_SLOTS) // Returns the number of "slots" used, where for this purpose a // register counts as a slot. unsigned getSlotCount() const { if (isBackFilled) { assert(isPassedInRegisters()); assert(numRegs == 1); } else if (GetRegNum() == REG_STK) { assert(!isPassedInRegisters()); assert(numRegs == 0); } else { assert(numRegs > 0); } return numSlots + numRegs; } #endif #if defined(DEBUG_ARG_SLOTS) // Returns the size as a multiple of pointer-size. // For targets without HFAs, this is the same as getSlotCount(). unsigned getSize() const { unsigned size = getSlotCount(); if (GlobalJitOptions::compFeatureHfa) { if (IsHfaRegArg()) { #ifdef TARGET_ARM // We counted the number of regs, but if they are DOUBLE hfa regs we have to double the size. if (GetHfaType() == TYP_DOUBLE) { assert(!IsSplit()); size <<= 1; } #elif defined(TARGET_ARM64) // We counted the number of regs, but if they are FLOAT hfa regs we have to halve the size, // or if they are SIMD16 vector hfa regs we have to double the size. if (GetHfaType() == TYP_FLOAT) { // Round up in case of odd HFA count. size = (size + 1) >> 1; } #ifdef FEATURE_SIMD else if (GetHfaType() == TYP_SIMD16) { size <<= 1; } #endif // FEATURE_SIMD #endif // TARGET_ARM64 } } return size; } #endif // DEBUG_ARG_SLOTS private: unsigned m_byteOffset; // byte size that this argument takes including the padding after. // For example, 1-byte arg on x64 with 8-byte alignment // will have `m_byteSize == 8`, the same arg on apple arm64 will have `m_byteSize == 1`. unsigned m_byteSize; unsigned m_byteAlignment; // usually 4 or 8 bytes (slots/registers). public: void SetByteOffset(unsigned byteOffset) { DEBUG_ARG_SLOTS_ASSERT(byteOffset / TARGET_POINTER_SIZE == slotNum); m_byteOffset = byteOffset; } unsigned GetByteOffset() const { DEBUG_ARG_SLOTS_ASSERT(m_byteOffset / TARGET_POINTER_SIZE == slotNum); return m_byteOffset; } void SetByteSize(unsigned byteSize, bool isStruct, bool isFloatHfa) { unsigned roundedByteSize; if (compMacOsArm64Abi()) { // Only struct types need extension or rounding to pointer size, but HFA<float> does not. if (isStruct && !isFloatHfa) { roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); } else { roundedByteSize = byteSize; } } else { roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); } #if !defined(TARGET_ARM) // Arm32 could have a struct with 8 byte alignment // which rounded size % 8 is not 0. assert(m_byteAlignment != 0); assert(roundedByteSize % m_byteAlignment == 0); #endif // TARGET_ARM #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi() && !isStruct) { assert(roundedByteSize == getSlotCount() * TARGET_POINTER_SIZE); } #endif m_byteSize = roundedByteSize; } unsigned GetByteSize() const { return m_byteSize; } void SetByteAlignment(unsigned byteAlignment) { m_byteAlignment = byteAlignment; } unsigned GetByteAlignment() const { return m_byteAlignment; } // Set the register numbers for a multireg argument. // There's nothing to do on x64/Ux because the structDesc has already been used to set the // register numbers. void SetMultiRegNums() { #if FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI) if (numRegs == 1) { return; } regNumber argReg = GetRegNum(0); #ifdef TARGET_ARM unsigned int regSize = (GetHfaType() == TYP_DOUBLE) ? 2 : 1; #else unsigned int regSize = 1; #endif if (numRegs > MAX_ARG_REG_COUNT) NO_WAY("Multireg argument exceeds the maximum length"); for (unsigned int regIndex = 1; regIndex < numRegs; regIndex++) { argReg = (regNumber)(argReg + regSize); setRegNum(regIndex, argReg); } #endif // FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI) } #ifdef DEBUG // Check that the value of 'isStruct' is consistent. // A struct arg must be one of the following: // - A node of struct type, // - A GT_FIELD_LIST, or // - A node of a scalar type, passed in a single register or slot // (or two slots in the case of a struct pass on the stack as TYP_DOUBLE). // void checkIsStruct() const { GenTree* node = GetNode(); if (isStruct) { if (!varTypeIsStruct(node) && !node->OperIs(GT_FIELD_LIST)) { // This is the case where we are passing a struct as a primitive type. // On most targets, this is always a single register or slot. // However, on ARM this could be two slots if it is TYP_DOUBLE. bool isPassedAsPrimitiveType = ((numRegs == 1) || ((numRegs == 0) && (GetByteSize() <= TARGET_POINTER_SIZE))); #ifdef TARGET_ARM if (!isPassedAsPrimitiveType) { if (node->TypeGet() == TYP_DOUBLE && numRegs == 0 && (numSlots == 2)) { isPassedAsPrimitiveType = true; } } #endif // TARGET_ARM assert(isPassedAsPrimitiveType); } } else { assert(!varTypeIsStruct(node)); } } void Dump() const; #endif }; //------------------------------------------------------------------------- // // The class fgArgInfo is used to handle the arguments // when morphing a GT_CALL node. // class fgArgInfo { Compiler* compiler; // Back pointer to the compiler instance so that we can allocate memory GenTreeCall* callTree; // Back pointer to the GT_CALL node for this fgArgInfo unsigned argCount; // Updatable arg count value #if defined(DEBUG_ARG_SLOTS) unsigned nextSlotNum; // Updatable slot count value #endif unsigned nextStackByteOffset; unsigned stkLevel; // Stack depth when we make this call (for x86) #if defined(UNIX_X86_ABI) bool alignmentDone; // Updateable flag, set to 'true' after we've done any required alignment. unsigned stkSizeBytes; // Size of stack used by this call, in bytes. Calculated during fgMorphArgs(). unsigned padStkAlign; // Stack alignment in bytes required before arguments are pushed for this call. // Computed dynamically during codegen, based on stkSizeBytes and the current // stack level (genStackLevel) when the first stack adjustment is made for // this call. #endif #if FEATURE_FIXED_OUT_ARGS unsigned outArgSize; // Size of the out arg area for the call, will be at least MIN_ARG_AREA_FOR_CALL #endif unsigned argTableSize; // size of argTable array (equal to the argCount when done with fgMorphArgs) bool hasRegArgs; // true if we have one or more register arguments bool hasStackArgs; // true if we have one or more stack arguments bool argsComplete; // marker for state bool argsSorted; // marker for state bool needsTemps; // one or more arguments must be copied to a temp by EvalArgsToTemps fgArgTabEntry** argTable; // variable sized array of per argument descrption: (i.e. argTable[argTableSize]) private: void AddArg(fgArgTabEntry* curArgTabEntry); public: fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned argCount); fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall); fgArgTabEntry* AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg = false); #ifdef UNIX_AMD64_ABI fgArgTabEntry* AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, const bool isStruct, const bool isFloatHfa, const bool isVararg, const regNumber otherRegNum, const unsigned structIntRegs, const unsigned structFloatRegs, const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr = nullptr); #endif // UNIX_AMD64_ABI fgArgTabEntry* AddStkArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, unsigned numSlots, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg = false); void RemorphReset(); void UpdateRegArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing); void UpdateStkArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing); void SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots); void EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode); void ArgsComplete(); void SortArgs(); void EvalArgsToTemps(); unsigned ArgCount() const { return argCount; } fgArgTabEntry** ArgTable() const { return argTable; } #if defined(DEBUG_ARG_SLOTS) unsigned GetNextSlotNum() const { return nextSlotNum; } #endif unsigned GetNextSlotByteOffset() const { return nextStackByteOffset; } bool HasRegArgs() const { return hasRegArgs; } bool NeedsTemps() const { return needsTemps; } bool HasStackArgs() const { return hasStackArgs; } bool AreArgsComplete() const { return argsComplete; } #if FEATURE_FIXED_OUT_ARGS unsigned GetOutArgSize() const { return outArgSize; } void SetOutArgSize(unsigned newVal) { outArgSize = newVal; } #endif // FEATURE_FIXED_OUT_ARGS #if defined(UNIX_X86_ABI) void ComputeStackAlignment(unsigned curStackLevelInBytes) { padStkAlign = AlignmentPad(curStackLevelInBytes, STACK_ALIGN); } unsigned GetStkAlign() const { return padStkAlign; } void SetStkSizeBytes(unsigned newStkSizeBytes) { stkSizeBytes = newStkSizeBytes; } unsigned GetStkSizeBytes() const { return stkSizeBytes; } bool IsStkAlignmentDone() const { return alignmentDone; } void SetStkAlignmentDone() { alignmentDone = true; } #endif // defined(UNIX_X86_ABI) // Get the fgArgTabEntry for the arg at position argNum. fgArgTabEntry* GetArgEntry(unsigned argNum, bool reMorphing = true) const { fgArgTabEntry* curArgTabEntry = nullptr; if (!reMorphing) { // The arg table has not yet been sorted. curArgTabEntry = argTable[argNum]; assert(curArgTabEntry->argNum == argNum); return curArgTabEntry; } for (unsigned i = 0; i < argCount; i++) { curArgTabEntry = argTable[i]; if (curArgTabEntry->argNum == argNum) { return curArgTabEntry; } } noway_assert(!"GetArgEntry: argNum not found"); return nullptr; } void SetNeedsTemps() { needsTemps = true; } // Get the node for the arg at position argIndex. // Caller must ensure that this index is a valid arg index. GenTree* GetArgNode(unsigned argIndex) const { return GetArgEntry(argIndex)->GetNode(); } void Dump(Compiler* compiler) const; }; #ifdef DEBUG // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // We have the ability to mark source expressions with "Test Labels." // These drive assertions within the JIT, or internal JIT testing. For example, we could label expressions // that should be CSE defs, and other expressions that should uses of those defs, with a shared label. enum TestLabel // This must be kept identical to System.Runtime.CompilerServices.JitTestLabel.TestLabel. { TL_SsaName, TL_VN, // Defines a "VN equivalence class". (For full VN, including exceptions thrown). TL_VNNorm, // Like above, but uses the non-exceptional value of the expression. TL_CSE_Def, // This must be identified in the JIT as a CSE def TL_CSE_Use, // This must be identified in the JIT as a CSE use TL_LoopHoist, // Expression must (or must not) be hoisted out of the loop. }; struct TestLabelAndNum { TestLabel m_tl; ssize_t m_num; TestLabelAndNum() : m_tl(TestLabel(0)), m_num(0) { } }; typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, TestLabelAndNum> NodeToTestDataMap; // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif // DEBUG //------------------------------------------------------------------------- // LoopFlags: flags for the loop table. // enum LoopFlags : unsigned short { LPFLG_EMPTY = 0, // LPFLG_UNUSED = 0x0001, // LPFLG_UNUSED = 0x0002, LPFLG_ITER = 0x0004, // loop of form: for (i = icon or lclVar; test_condition(); i++) // LPFLG_UNUSED = 0x0008, LPFLG_CONTAINS_CALL = 0x0010, // If executing the loop body *may* execute a call LPFLG_VAR_INIT = 0x0020, // iterator is initialized with a local var (var # found in lpVarInit) LPFLG_CONST_INIT = 0x0040, // iterator is initialized with a constant (found in lpConstInit) LPFLG_SIMD_LIMIT = 0x0080, // iterator is compared with vector element count (found in lpConstLimit) LPFLG_VAR_LIMIT = 0x0100, // iterator is compared with a local var (var # found in lpVarLimit) LPFLG_CONST_LIMIT = 0x0200, // iterator is compared with a constant (found in lpConstLimit) LPFLG_ARRLEN_LIMIT = 0x0400, // iterator is compared with a.len or a[i].len (found in lpArrLenLimit) LPFLG_HAS_PREHEAD = 0x0800, // lpHead is known to be a preHead for this loop LPFLG_REMOVED = 0x1000, // has been removed from the loop table (unrolled or optimized away) LPFLG_DONT_UNROLL = 0x2000, // do not unroll this loop LPFLG_ASGVARS_YES = 0x4000, // "lpAsgVars" has been computed LPFLG_ASGVARS_INC = 0x8000, // "lpAsgVars" is incomplete -- vars beyond those representable in an AllVarSet // type are assigned to. }; inline constexpr LoopFlags operator~(LoopFlags a) { return (LoopFlags)(~(unsigned short)a); } inline constexpr LoopFlags operator|(LoopFlags a, LoopFlags b) { return (LoopFlags)((unsigned short)a | (unsigned short)b); } inline constexpr LoopFlags operator&(LoopFlags a, LoopFlags b) { return (LoopFlags)((unsigned short)a & (unsigned short)b); } inline LoopFlags& operator|=(LoopFlags& a, LoopFlags b) { return a = (LoopFlags)((unsigned short)a | (unsigned short)b); } inline LoopFlags& operator&=(LoopFlags& a, LoopFlags b) { return a = (LoopFlags)((unsigned short)a & (unsigned short)b); } // The following holds information about instr offsets in terms of generated code. enum class IPmappingDscKind { Prolog, // The mapping represents the start of a prolog. Epilog, // The mapping represents the start of an epilog. NoMapping, // This does not map to any IL offset. Normal, // The mapping maps to an IL offset. }; struct IPmappingDsc { emitLocation ipmdNativeLoc; // the emitter location of the native code corresponding to the IL offset IPmappingDscKind ipmdKind; // The kind of mapping ILLocation ipmdLoc; // The location for normal mappings bool ipmdIsLabel; // Can this code be a branch label? }; struct PreciseIPMapping { emitLocation nativeLoc; DebugInfo debugInfo; }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX The big guy. The sections are currently organized as : XX XX XX XX o GenTree and BasicBlock XX XX o LclVarsInfo XX XX o Importer XX XX o FlowGraph XX XX o Optimizer XX XX o RegAlloc XX XX o EEInterface XX XX o TempsInfo XX XX o RegSet XX XX o GCInfo XX XX o Instruction XX XX o ScopeInfo XX XX o PrologScopeInfo XX XX o CodeGenerator XX XX o UnwindInfo XX XX o Compiler XX XX o typeInfo XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ struct HWIntrinsicInfo; class Compiler { friend class emitter; friend class UnwindInfo; friend class UnwindFragmentInfo; friend class UnwindEpilogInfo; friend class JitTimer; friend class LinearScan; friend class fgArgInfo; friend class Rationalizer; friend class Phase; friend class Lowering; friend class CSE_DataFlow; friend class CSE_Heuristic; friend class CodeGenInterface; friend class CodeGen; friend class LclVarDsc; friend class TempDsc; friend class LIR; friend class ObjectAllocator; friend class LocalAddressVisitor; friend struct GenTree; friend class MorphInitBlockHelper; friend class MorphCopyBlockHelper; #ifdef FEATURE_HW_INTRINSICS friend struct HWIntrinsicInfo; #endif // FEATURE_HW_INTRINSICS #ifndef TARGET_64BIT friend class DecomposeLongs; #endif // !TARGET_64BIT /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Misc structs definitions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: hashBvGlobalData hbvGlobalData; // Used by the hashBv bitvector package. #ifdef DEBUG bool verbose; bool verboseTrees; bool shouldUseVerboseTrees(); bool asciiTrees; // If true, dump trees using only ASCII characters bool shouldDumpASCIITrees(); bool verboseSsa; // If true, produce especially verbose dump output in SSA construction. bool shouldUseVerboseSsa(); bool treesBeforeAfterMorph; // If true, print trees before/after morphing (paired by an intra-compilation id: int morphNum; // This counts the the trees that have been morphed, allowing us to label each uniquely. bool doExtraSuperPmiQueries; void makeExtraStructQueries(CORINFO_CLASS_HANDLE structHandle, int level); // Make queries recursively 'level' deep. const char* VarNameToStr(VarName name) { return name; } DWORD expensiveDebugCheckLevel; #endif #if FEATURE_MULTIREG_RET GenTree* impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)); #endif // FEATURE_MULTIREG_RET #ifdef TARGET_X86 bool isTrivialPointerSizedStruct(CORINFO_CLASS_HANDLE clsHnd) const; #endif // TARGET_X86 //------------------------------------------------------------------------- // Functions to handle homogeneous floating-point aggregates (HFAs) in ARM/ARM64. // HFAs are one to four element structs where each element is the same // type, either all float or all double. We handle HVAs (one to four elements of // vector types) uniformly with HFAs. HFAs are treated specially // in the ARM/ARM64 Procedure Call Standards, specifically, they are passed in // floating-point registers instead of the general purpose registers. // bool IsHfa(CORINFO_CLASS_HANDLE hClass); bool IsHfa(GenTree* tree); var_types GetHfaType(GenTree* tree); unsigned GetHfaCount(GenTree* tree); var_types GetHfaType(CORINFO_CLASS_HANDLE hClass); unsigned GetHfaCount(CORINFO_CLASS_HANDLE hClass); bool IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass, CorInfoCallConvExtension callConv); //------------------------------------------------------------------------- // The following is used for validating format of EH table // struct EHNodeDsc; typedef struct EHNodeDsc* pEHNodeDsc; EHNodeDsc* ehnTree; // root of the tree comprising the EHnodes. EHNodeDsc* ehnNext; // root of the tree comprising the EHnodes. struct EHNodeDsc { enum EHBlockType { TryNode, FilterNode, HandlerNode, FinallyNode, FaultNode }; EHBlockType ehnBlockType; // kind of EH block IL_OFFSET ehnStartOffset; // IL offset of start of the EH block IL_OFFSET ehnEndOffset; // IL offset past end of the EH block. (TODO: looks like verInsertEhNode() sets this to // the last IL offset, not "one past the last one", i.e., the range Start to End is // inclusive). pEHNodeDsc ehnNext; // next (non-nested) block in sequential order pEHNodeDsc ehnChild; // leftmost nested block union { pEHNodeDsc ehnTryNode; // for filters and handlers, the corresponding try node pEHNodeDsc ehnHandlerNode; // for a try node, the corresponding handler node }; pEHNodeDsc ehnFilterNode; // if this is a try node and has a filter, otherwise 0 pEHNodeDsc ehnEquivalent; // if blockType=tryNode, start offset and end offset is same, void ehnSetTryNodeType() { ehnBlockType = TryNode; } void ehnSetFilterNodeType() { ehnBlockType = FilterNode; } void ehnSetHandlerNodeType() { ehnBlockType = HandlerNode; } void ehnSetFinallyNodeType() { ehnBlockType = FinallyNode; } void ehnSetFaultNodeType() { ehnBlockType = FaultNode; } bool ehnIsTryBlock() { return ehnBlockType == TryNode; } bool ehnIsFilterBlock() { return ehnBlockType == FilterNode; } bool ehnIsHandlerBlock() { return ehnBlockType == HandlerNode; } bool ehnIsFinallyBlock() { return ehnBlockType == FinallyNode; } bool ehnIsFaultBlock() { return ehnBlockType == FaultNode; } // returns true if there is any overlap between the two nodes static bool ehnIsOverlap(pEHNodeDsc node1, pEHNodeDsc node2) { if (node1->ehnStartOffset < node2->ehnStartOffset) { return (node1->ehnEndOffset >= node2->ehnStartOffset); } else { return (node1->ehnStartOffset <= node2->ehnEndOffset); } } // fails with BADCODE if inner is not completely nested inside outer static bool ehnIsNested(pEHNodeDsc inner, pEHNodeDsc outer) { return ((inner->ehnStartOffset >= outer->ehnStartOffset) && (inner->ehnEndOffset <= outer->ehnEndOffset)); } }; //------------------------------------------------------------------------- // Exception handling functions // #if !defined(FEATURE_EH_FUNCLETS) bool ehNeedsShadowSPslots() { return (info.compXcptnsCount || opts.compDbgEnC); } // 0 for methods with no EH // 1 for methods with non-nested EH, or where only the try blocks are nested // 2 for a method with a catch within a catch // etc. unsigned ehMaxHndNestingCount; #endif // !FEATURE_EH_FUNCLETS static bool jitIsBetween(unsigned value, unsigned start, unsigned end); static bool jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end); bool bbInCatchHandlerILRange(BasicBlock* blk); bool bbInFilterILRange(BasicBlock* blk); bool bbInTryRegions(unsigned regionIndex, BasicBlock* blk); bool bbInExnFlowRegions(unsigned regionIndex, BasicBlock* blk); bool bbInHandlerRegions(unsigned regionIndex, BasicBlock* blk); bool bbInCatchHandlerRegions(BasicBlock* tryBlk, BasicBlock* hndBlk); unsigned short bbFindInnermostCommonTryRegion(BasicBlock* bbOne, BasicBlock* bbTwo); unsigned short bbFindInnermostTryRegionContainingHandlerRegion(unsigned handlerIndex); unsigned short bbFindInnermostHandlerRegionContainingTryRegion(unsigned tryIndex); // Returns true if "block" is the start of a try region. bool bbIsTryBeg(BasicBlock* block); // Returns true if "block" is the start of a handler or filter region. bool bbIsHandlerBeg(BasicBlock* block); // Returns true iff "block" is where control flows if an exception is raised in the // try region, and sets "*regionIndex" to the index of the try for the handler. // Differs from "IsHandlerBeg" in the case of filters, where this is true for the first // block of the filter, but not for the filter's handler. bool bbIsExFlowBlock(BasicBlock* block, unsigned* regionIndex); bool ehHasCallableHandlers(); // Return the EH descriptor for the given region index. EHblkDsc* ehGetDsc(unsigned regionIndex); // Return the EH index given a region descriptor. unsigned ehGetIndex(EHblkDsc* ehDsc); // Return the EH descriptor index of the enclosing try, for the given region index. unsigned ehGetEnclosingTryIndex(unsigned regionIndex); // Return the EH descriptor index of the enclosing handler, for the given region index. unsigned ehGetEnclosingHndIndex(unsigned regionIndex); // Return the EH descriptor for the most nested 'try' region this BasicBlock is a member of (or nullptr if this // block is not in a 'try' region). EHblkDsc* ehGetBlockTryDsc(BasicBlock* block); // Return the EH descriptor for the most nested filter or handler region this BasicBlock is a member of (or nullptr // if this block is not in a filter or handler region). EHblkDsc* ehGetBlockHndDsc(BasicBlock* block); // Return the EH descriptor for the most nested region that may handle exceptions raised in this BasicBlock (or // nullptr if this block's exceptions propagate to caller). EHblkDsc* ehGetBlockExnFlowDsc(BasicBlock* block); EHblkDsc* ehIsBlockTryLast(BasicBlock* block); EHblkDsc* ehIsBlockHndLast(BasicBlock* block); bool ehIsBlockEHLast(BasicBlock* block); bool ehBlockHasExnFlowDsc(BasicBlock* block); // Return the region index of the most nested EH region this block is in. unsigned ehGetMostNestedRegionIndex(BasicBlock* block, bool* inTryRegion); // Find the true enclosing try index, ignoring 'mutual protect' try. Uses IL ranges to check. unsigned ehTrueEnclosingTryIndexIL(unsigned regionIndex); // Return the index of the most nested enclosing region for a particular EH region. Returns NO_ENCLOSING_INDEX // if there is no enclosing region. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' // is set to 'true' if the enclosing region is a 'try', or 'false' if the enclosing region is a handler. // (It can never be a filter.) unsigned ehGetEnclosingRegionIndex(unsigned regionIndex, bool* inTryRegion); // A block has been deleted. Update the EH table appropriately. void ehUpdateForDeletedBlock(BasicBlock* block); // Determine whether a block can be deleted while preserving the EH normalization rules. bool ehCanDeleteEmptyBlock(BasicBlock* block); // Update the 'last' pointers in the EH table to reflect new or deleted blocks in an EH region. void ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* newLast); // For a finally handler, find the region index that the BBJ_CALLFINALLY lives in that calls the handler, // or NO_ENCLOSING_INDEX if the BBJ_CALLFINALLY lives in the main function body. Normally, the index // is the same index as the handler (and the BBJ_CALLFINALLY lives in the 'try' region), but for AMD64 the // BBJ_CALLFINALLY lives in the enclosing try or handler region, whichever is more nested, or the main function // body. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' is set to 'true' if the // BBJ_CALLFINALLY lives in the returned index's 'try' region, or 'false' if lives in the handler region. (It never // lives in a filter.) unsigned ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTryRegion); // Find the range of basic blocks in which all BBJ_CALLFINALLY will be found that target the 'finallyIndex' region's // handler. Set begBlk to the first block, and endBlk to the block after the last block of the range // (nullptr if the last block is the last block in the program). // Precondition: 'finallyIndex' is the EH region of a try/finally clause. void ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** begBlk, BasicBlock** endBlk); #ifdef DEBUG // Given a BBJ_CALLFINALLY block and the EH region index of the finally it is calling, return // 'true' if the BBJ_CALLFINALLY is in the correct EH region. bool ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex); #endif // DEBUG #if defined(FEATURE_EH_FUNCLETS) // Do we need a PSPSym in the main function? For codegen purposes, we only need one // if there is a filter that protects a region with a nested EH clause (such as a // try/catch nested in the 'try' body of a try/filter/filter-handler). See // genFuncletProlog() for more details. However, the VM seems to use it for more // purposes, maybe including debugging. Until we are sure otherwise, always create // a PSPSym for functions with any EH. bool ehNeedsPSPSym() const { #ifdef TARGET_X86 return false; #else // TARGET_X86 return compHndBBtabCount > 0; #endif // TARGET_X86 } bool ehAnyFunclets(); // Are there any funclets in this function? unsigned ehFuncletCount(); // Return the count of funclets in the function unsigned bbThrowIndex(BasicBlock* blk); // Get the index to use as the cache key for sharing throw blocks #else // !FEATURE_EH_FUNCLETS bool ehAnyFunclets() { return false; } unsigned ehFuncletCount() { return 0; } unsigned bbThrowIndex(BasicBlock* blk) { return blk->bbTryIndex; } // Get the index to use as the cache key for sharing throw blocks #endif // !FEATURE_EH_FUNCLETS // Returns a flowList representing the "EH predecessors" of "blk". These are the normal predecessors of // "blk", plus one special case: if "blk" is the first block of a handler, considers the predecessor(s) of the first // first block of the corresponding try region to be "EH predecessors". (If there is a single such predecessor, // for example, we want to consider that the immediate dominator of the catch clause start block, so it's // convenient to also consider it a predecessor.) flowList* BlockPredsWithEH(BasicBlock* blk); // This table is useful for memoization of the method above. typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, flowList*> BlockToFlowListMap; BlockToFlowListMap* m_blockToEHPreds; BlockToFlowListMap* GetBlockToEHPreds() { if (m_blockToEHPreds == nullptr) { m_blockToEHPreds = new (getAllocator()) BlockToFlowListMap(getAllocator()); } return m_blockToEHPreds; } void* ehEmitCookie(BasicBlock* block); UNATIVE_OFFSET ehCodeOffset(BasicBlock* block); EHblkDsc* ehInitHndRange(BasicBlock* src, IL_OFFSET* hndBeg, IL_OFFSET* hndEnd, bool* inFilter); EHblkDsc* ehInitTryRange(BasicBlock* src, IL_OFFSET* tryBeg, IL_OFFSET* tryEnd); EHblkDsc* ehInitHndBlockRange(BasicBlock* blk, BasicBlock** hndBeg, BasicBlock** hndLast, bool* inFilter); EHblkDsc* ehInitTryBlockRange(BasicBlock* blk, BasicBlock** tryBeg, BasicBlock** tryLast); void fgSetTryBeg(EHblkDsc* handlerTab, BasicBlock* newTryBeg); void fgSetTryEnd(EHblkDsc* handlerTab, BasicBlock* newTryLast); void fgSetHndEnd(EHblkDsc* handlerTab, BasicBlock* newHndLast); void fgSkipRmvdBlocks(EHblkDsc* handlerTab); void fgAllocEHTable(); void fgRemoveEHTableEntry(unsigned XTnum); #if defined(FEATURE_EH_FUNCLETS) EHblkDsc* fgAddEHTableEntry(unsigned XTnum); #endif // FEATURE_EH_FUNCLETS #if !FEATURE_EH void fgRemoveEH(); #endif // !FEATURE_EH void fgSortEHTable(); // Causes the EH table to obey some well-formedness conditions, by inserting // empty BB's when necessary: // * No block is both the first block of a handler and the first block of a try. // * No block is the first block of multiple 'try' regions. // * No block is the last block of multiple EH regions. void fgNormalizeEH(); bool fgNormalizeEHCase1(); bool fgNormalizeEHCase2(); bool fgNormalizeEHCase3(); void fgCheckForLoopsInHandlers(); #ifdef DEBUG void dispIncomingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause); void dispOutgoingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause); void fgVerifyHandlerTab(); void fgDispHandlerTab(); #endif // DEBUG bool fgNeedToSortEHTable; void verInitEHTree(unsigned numEHClauses); void verInsertEhNode(CORINFO_EH_CLAUSE* clause, EHblkDsc* handlerTab); void verInsertEhNodeInTree(EHNodeDsc** ppRoot, EHNodeDsc* node); void verInsertEhNodeParent(EHNodeDsc** ppRoot, EHNodeDsc* node); void verCheckNestingLevel(EHNodeDsc* initRoot); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX GenTree and BasicBlock XX XX XX XX Functions to allocate and display the GenTrees and BasicBlocks XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // Functions to create nodes Statement* gtNewStmt(GenTree* expr = nullptr); Statement* gtNewStmt(GenTree* expr, const DebugInfo& di); // For unary opers. GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, bool doSimplifications = TRUE); // For binary opers. GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2); GenTreeColon* gtNewColonNode(var_types type, GenTree* elseNode, GenTree* thenNode); GenTreeQmark* gtNewQmarkNode(var_types type, GenTree* cond, GenTreeColon* colon); GenTree* gtNewLargeOperNode(genTreeOps oper, var_types type = TYP_I_IMPL, GenTree* op1 = nullptr, GenTree* op2 = nullptr); GenTreeIntCon* gtNewIconNode(ssize_t value, var_types type = TYP_INT); GenTreeIntCon* gtNewIconNode(unsigned fieldOffset, FieldSeqNode* fieldSeq); GenTreeIntCon* gtNewNull(); GenTreeIntCon* gtNewTrue(); GenTreeIntCon* gtNewFalse(); GenTree* gtNewPhysRegNode(regNumber reg, var_types type); GenTree* gtNewJmpTableNode(); GenTree* gtNewIndOfIconHandleNode(var_types indType, size_t value, GenTreeFlags iconFlags, bool isInvariant); GenTree* gtNewIconHandleNode(size_t value, GenTreeFlags flags, FieldSeqNode* fields = nullptr); GenTreeFlags gtTokenToIconFlags(unsigned token); GenTree* gtNewIconEmbHndNode(void* value, void* pValue, GenTreeFlags flags, void* compileTimeHandle); GenTree* gtNewIconEmbScpHndNode(CORINFO_MODULE_HANDLE scpHnd); GenTree* gtNewIconEmbClsHndNode(CORINFO_CLASS_HANDLE clsHnd); GenTree* gtNewIconEmbMethHndNode(CORINFO_METHOD_HANDLE methHnd); GenTree* gtNewIconEmbFldHndNode(CORINFO_FIELD_HANDLE fldHnd); GenTree* gtNewStringLiteralNode(InfoAccessType iat, void* pValue); GenTreeIntCon* gtNewStringLiteralLength(GenTreeStrCon* node); GenTree* gtNewLconNode(__int64 value); GenTree* gtNewDconNode(double value, var_types type = TYP_DOUBLE); GenTree* gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle); GenTree* gtNewZeroConNode(var_types type); GenTree* gtNewOneConNode(var_types type); GenTreeLclVar* gtNewStoreLclVar(unsigned dstLclNum, GenTree* src); #ifdef FEATURE_SIMD GenTree* gtNewSIMDVectorZero(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize); #endif GenTree* gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile, bool isCopyBlock); GenTree* gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg); GenTree* gtNewBitCastNode(var_types type, GenTree* arg); protected: void gtBlockOpInit(GenTree* result, GenTree* dst, GenTree* srcOrFillVal, bool isVolatile); public: GenTreeObj* gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr); void gtSetObjGcInfo(GenTreeObj* objNode); GenTree* gtNewStructVal(CORINFO_CLASS_HANDLE structHnd, GenTree* addr); GenTree* gtNewBlockVal(GenTree* addr, unsigned size); GenTree* gtNewCpObjNode(GenTree* dst, GenTree* src, CORINFO_CLASS_HANDLE structHnd, bool isVolatile); GenTreeCall::Use* gtNewCallArgs(GenTree* node); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3, GenTree* node4); GenTreeCall::Use* gtPrependNewCallArg(GenTree* node, GenTreeCall::Use* args); GenTreeCall::Use* gtInsertNewCallArgAfter(GenTree* node, GenTreeCall::Use* after); GenTreeCall* gtNewCallNode(gtCallTypes callType, CORINFO_METHOD_HANDLE handle, var_types type, GenTreeCall::Use* args, const DebugInfo& di = DebugInfo()); GenTreeCall* gtNewIndCallNode(GenTree* addr, var_types type, GenTreeCall::Use* args, const DebugInfo& di = DebugInfo()); GenTreeCall* gtNewHelperCallNode(unsigned helper, var_types type, GenTreeCall::Use* args = nullptr); GenTreeCall* gtNewRuntimeLookupHelperCallNode(CORINFO_RUNTIME_LOOKUP* pRuntimeLookup, GenTree* ctxTree, void* compileTimeHandle); GenTreeLclVar* gtNewLclvNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET)); GenTreeLclVar* gtNewLclLNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET)); GenTreeLclVar* gtNewLclVarAddrNode(unsigned lclNum, var_types type = TYP_I_IMPL); GenTreeLclFld* gtNewLclFldAddrNode(unsigned lclNum, unsigned lclOffs, FieldSeqNode* fieldSeq, var_types type = TYP_I_IMPL); #ifdef FEATURE_SIMD GenTreeSIMD* gtNewSIMDNode( var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize); GenTreeSIMD* gtNewSIMDNode(var_types type, GenTree* op1, GenTree* op2, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize); void SetOpLclRelatedToSIMDIntrinsic(GenTree* op); #endif #ifdef FEATURE_HW_INTRINSICS GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, GenTree* op4, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree** operands, size_t operandCount, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, IntrinsicNodeBuilder&& nodeBuilder, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode( var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, op2, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTree* gtNewSimdAbsNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdBinOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCeilNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpAllNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpAnyNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCndSelNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCreateBroadcastNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdDotProdNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdFloorNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdGetElementNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdMaxNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdMinNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdNarrowNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdSqrtNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdSumNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdUnOpNode(genTreeOps op, var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWidenLowerNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWidenUpperNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWithElementNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdZeroNode(var_types type, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode( var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID); CORINFO_CLASS_HANDLE gtGetStructHandleForHWSIMD(var_types simdType, CorInfoType simdBaseJitType); CorInfoType getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType); #endif // FEATURE_HW_INTRINSICS GenTree* gtNewMustThrowException(unsigned helper, var_types type, CORINFO_CLASS_HANDLE clsHnd); GenTreeLclFld* gtNewLclFldNode(unsigned lnum, var_types type, unsigned offset); GenTree* gtNewInlineCandidateReturnExpr(GenTree* inlineCandidate, var_types type, BasicBlockFlags bbFlags); GenTreeField* gtNewFieldRef(var_types type, CORINFO_FIELD_HANDLE fldHnd, GenTree* obj = nullptr, DWORD offset = 0); GenTree* gtNewIndexRef(var_types typ, GenTree* arrayOp, GenTree* indexOp); GenTreeArrLen* gtNewArrLen(var_types typ, GenTree* arrayOp, int lenOffset, BasicBlock* block); GenTreeIndir* gtNewIndir(var_types typ, GenTree* addr); GenTree* gtNewNullCheck(GenTree* addr, BasicBlock* basicBlock); var_types gtTypeForNullCheck(GenTree* tree); void gtChangeOperToNullCheck(GenTree* tree, BasicBlock* block); static fgArgTabEntry* gtArgEntryByArgNum(GenTreeCall* call, unsigned argNum); static fgArgTabEntry* gtArgEntryByNode(GenTreeCall* call, GenTree* node); fgArgTabEntry* gtArgEntryByLateArgIndex(GenTreeCall* call, unsigned lateArgInx); static GenTree* gtArgNodeByLateArgInx(GenTreeCall* call, unsigned lateArgInx); GenTreeOp* gtNewAssignNode(GenTree* dst, GenTree* src); GenTree* gtNewTempAssign(unsigned tmp, GenTree* val, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* gtNewRefCOMfield(GenTree* objPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp, CORINFO_CLASS_HANDLE structType, GenTree* assg); GenTree* gtNewNothingNode(); GenTree* gtNewArgPlaceHolderNode(var_types type, CORINFO_CLASS_HANDLE clsHnd); GenTree* gtUnusedValNode(GenTree* expr); GenTree* gtNewKeepAliveNode(GenTree* op); GenTreeCast* gtNewCastNode(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType); GenTreeCast* gtNewCastNodeL(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType); GenTreeAllocObj* gtNewAllocObjNode( unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, var_types type, GenTree* op1); GenTreeAllocObj* gtNewAllocObjNode(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool useParent); GenTree* gtNewRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* lookupTree); GenTreeIndir* gtNewMethodTableLookup(GenTree* obj); //------------------------------------------------------------------------ // Other GenTree functions GenTree* gtClone(GenTree* tree, bool complexOK = false); // If `tree` is a lclVar with lclNum `varNum`, return an IntCns with value `varVal`; otherwise, // create a copy of `tree`, adding specified flags, replacing uses of lclVar `deepVarNum` with // IntCnses with value `deepVarVal`. GenTree* gtCloneExpr( GenTree* tree, GenTreeFlags addFlags, unsigned varNum, int varVal, unsigned deepVarNum, int deepVarVal); // Create a copy of `tree`, optionally adding specifed flags, and optionally mapping uses of local // `varNum` to int constants with value `varVal`. GenTree* gtCloneExpr(GenTree* tree, GenTreeFlags addFlags = GTF_EMPTY, unsigned varNum = BAD_VAR_NUM, int varVal = 0) { return gtCloneExpr(tree, addFlags, varNum, varVal, varNum, varVal); } Statement* gtCloneStmt(Statement* stmt) { GenTree* exprClone = gtCloneExpr(stmt->GetRootNode()); return gtNewStmt(exprClone, stmt->GetDebugInfo()); } // Internal helper for cloning a call GenTreeCall* gtCloneExprCallHelper(GenTreeCall* call, GenTreeFlags addFlags = GTF_EMPTY, unsigned deepVarNum = BAD_VAR_NUM, int deepVarVal = 0); // Create copy of an inline or guarded devirtualization candidate tree. GenTreeCall* gtCloneCandidateCall(GenTreeCall* call); void gtUpdateSideEffects(Statement* stmt, GenTree* tree); void gtUpdateTreeAncestorsSideEffects(GenTree* tree); void gtUpdateStmtSideEffects(Statement* stmt); void gtUpdateNodeSideEffects(GenTree* tree); void gtUpdateNodeOperSideEffects(GenTree* tree); void gtUpdateNodeOperSideEffectsPost(GenTree* tree); // Returns "true" iff the complexity (not formally defined, but first interpretation // is #of nodes in subtree) of "tree" is greater than "limit". // (This is somewhat redundant with the "GetCostEx()/GetCostSz()" fields, but can be used // before they have been set.) bool gtComplexityExceeds(GenTree** tree, unsigned limit); GenTree* gtReverseCond(GenTree* tree); static bool gtHasRef(GenTree* tree, ssize_t lclNum); bool gtHasLocalsWithAddrOp(GenTree* tree); unsigned gtSetCallArgsOrder(const GenTreeCall::UseList& args, bool lateArgs, int* callCostEx, int* callCostSz); unsigned gtSetMultiOpOrder(GenTreeMultiOp* multiOp); void gtWalkOp(GenTree** op1, GenTree** op2, GenTree* base, bool constOnly); #ifdef DEBUG unsigned gtHashValue(GenTree* tree); GenTree* gtWalkOpEffectiveVal(GenTree* op); #endif void gtPrepareCost(GenTree* tree); bool gtIsLikelyRegVar(GenTree* tree); // Returns true iff the secondNode can be swapped with firstNode. bool gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode); // Given an address expression, compute its costs and addressing mode opportunities, // and mark addressing mode candidates as GTF_DONT_CSE. // TODO-Throughput - Consider actually instantiating these early, to avoid // having to re-run the algorithm that looks for them (might also improve CQ). bool gtMarkAddrMode(GenTree* addr, int* costEx, int* costSz, var_types type); unsigned gtSetEvalOrder(GenTree* tree); void gtSetStmtInfo(Statement* stmt); // Returns "true" iff "node" has any of the side effects in "flags". bool gtNodeHasSideEffects(GenTree* node, GenTreeFlags flags); // Returns "true" iff "tree" or its (transitive) children have any of the side effects in "flags". bool gtTreeHasSideEffects(GenTree* tree, GenTreeFlags flags); // Appends 'expr' in front of 'list' // 'list' will typically start off as 'nullptr' // when 'list' is non-null a GT_COMMA node is used to insert 'expr' GenTree* gtBuildCommaList(GenTree* list, GenTree* expr); void gtExtractSideEffList(GenTree* expr, GenTree** pList, GenTreeFlags GenTreeFlags = GTF_SIDE_EFFECT, bool ignoreRoot = false); GenTree* gtGetThisArg(GenTreeCall* call); // Static fields of struct types (and sometimes the types that those are reduced to) are represented by having the // static field contain an object pointer to the boxed struct. This simplifies the GC implementation...but // complicates the JIT somewhat. This predicate returns "true" iff a node with type "fieldNodeType", representing // the given "fldHnd", is such an object pointer. bool gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd); // Return true if call is a recursive call; return false otherwise. // Note when inlining, this looks for calls back to the root method. bool gtIsRecursiveCall(GenTreeCall* call) { return gtIsRecursiveCall(call->gtCallMethHnd); } bool gtIsRecursiveCall(CORINFO_METHOD_HANDLE callMethodHandle) { return (callMethodHandle == impInlineRoot()->info.compMethodHnd); } //------------------------------------------------------------------------- GenTree* gtFoldExpr(GenTree* tree); GenTree* gtFoldExprConst(GenTree* tree); GenTree* gtFoldExprSpecial(GenTree* tree); GenTree* gtFoldBoxNullable(GenTree* tree); GenTree* gtFoldExprCompare(GenTree* tree); GenTree* gtCreateHandleCompare(genTreeOps oper, GenTree* op1, GenTree* op2, CorInfoInlineTypeCheck typeCheckInliningResult); GenTree* gtFoldExprCall(GenTreeCall* call); GenTree* gtFoldTypeCompare(GenTree* tree); GenTree* gtFoldTypeEqualityCall(bool isEq, GenTree* op1, GenTree* op2); // Options to control behavior of gtTryRemoveBoxUpstreamEffects enum BoxRemovalOptions { BR_REMOVE_AND_NARROW, // remove effects, minimize remaining work, return possibly narrowed source tree BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE, // remove effects and minimize remaining work, return type handle tree BR_REMOVE_BUT_NOT_NARROW, // remove effects, return original source tree BR_DONT_REMOVE, // check if removal is possible, return copy source tree BR_DONT_REMOVE_WANT_TYPE_HANDLE, // check if removal is possible, return type handle tree BR_MAKE_LOCAL_COPY // revise box to copy to temp local and return local's address }; GenTree* gtTryRemoveBoxUpstreamEffects(GenTree* tree, BoxRemovalOptions options = BR_REMOVE_AND_NARROW); GenTree* gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp); //------------------------------------------------------------------------- // Get the handle, if any. CORINFO_CLASS_HANDLE gtGetStructHandleIfPresent(GenTree* tree); // Get the handle, and assert if not found. CORINFO_CLASS_HANDLE gtGetStructHandle(GenTree* tree); // Get the handle for a ref type. CORINFO_CLASS_HANDLE gtGetClassHandle(GenTree* tree, bool* pIsExact, bool* pIsNonNull); // Get the class handle for an helper call CORINFO_CLASS_HANDLE gtGetHelperCallClassHandle(GenTreeCall* call, bool* pIsExact, bool* pIsNonNull); // Get the element handle for an array of ref type. CORINFO_CLASS_HANDLE gtGetArrayElementClassHandle(GenTree* array); // Get a class handle from a helper call argument CORINFO_CLASS_HANDLE gtGetHelperArgClassHandle(GenTree* array); // Get the class handle for a field CORINFO_CLASS_HANDLE gtGetFieldClassHandle(CORINFO_FIELD_HANDLE fieldHnd, bool* pIsExact, bool* pIsNonNull); // Check if this tree is a gc static base helper call bool gtIsStaticGCBaseHelperCall(GenTree* tree); //------------------------------------------------------------------------- // Functions to display the trees #ifdef DEBUG void gtDispNode(GenTree* tree, IndentStack* indentStack, _In_z_ const char* msg, bool isLIR); void gtDispConst(GenTree* tree); void gtDispLeaf(GenTree* tree, IndentStack* indentStack); void gtDispNodeName(GenTree* tree); #if FEATURE_MULTIREG_RET unsigned gtDispMultiRegCount(GenTree* tree); #endif void gtDispRegVal(GenTree* tree); void gtDispZeroFieldSeq(GenTree* tree); void gtDispVN(GenTree* tree); void gtDispCommonEndLine(GenTree* tree); enum IndentInfo { IINone, IIArc, IIArcTop, IIArcBottom, IIEmbedded, IIError, IndentInfoCount }; void gtDispChild(GenTree* child, IndentStack* indentStack, IndentInfo arcType, _In_opt_ const char* msg = nullptr, bool topOnly = false); void gtDispTree(GenTree* tree, IndentStack* indentStack = nullptr, _In_opt_ const char* msg = nullptr, bool topOnly = false, bool isLIR = false); void gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut); int gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining); char* gtGetLclVarName(unsigned lclNum); void gtDispLclVar(unsigned lclNum, bool padForBiggestDisp = true); void gtDispLclVarStructType(unsigned lclNum); void gtDispClassLayout(ClassLayout* layout, var_types type); void gtDispILLocation(const ILLocation& loc); void gtDispStmt(Statement* stmt, const char* msg = nullptr); void gtDispBlockStmts(BasicBlock* block); void gtGetArgMsg(GenTreeCall* call, GenTree* arg, unsigned argNum, char* bufp, unsigned bufLength); void gtGetLateArgMsg(GenTreeCall* call, GenTree* arg, int argNum, char* bufp, unsigned bufLength); void gtDispArgList(GenTreeCall* call, GenTree* lastCallOperand, IndentStack* indentStack); void gtDispAnyFieldSeq(FieldSeqNode* fieldSeq); void gtDispFieldSeq(FieldSeqNode* pfsn); void gtDispRange(LIR::ReadOnlyRange const& range); void gtDispTreeRange(LIR::Range& containingRange, GenTree* tree); void gtDispLIRNode(GenTree* node, const char* prefixMsg = nullptr); #endif // For tree walks enum fgWalkResult { WALK_CONTINUE, WALK_SKIP_SUBTREES, WALK_ABORT }; struct fgWalkData; typedef fgWalkResult(fgWalkPreFn)(GenTree** pTree, fgWalkData* data); typedef fgWalkResult(fgWalkPostFn)(GenTree** pTree, fgWalkData* data); static fgWalkPreFn gtMarkColonCond; static fgWalkPreFn gtClearColonCond; struct FindLinkData { GenTree* nodeToFind; GenTree** result; GenTree* parent; }; FindLinkData gtFindLink(Statement* stmt, GenTree* node); bool gtHasCatchArg(GenTree* tree); typedef ArrayStack<GenTree*> GenTreeStack; static bool gtHasCallOnStack(GenTreeStack* parentStack); //========================================================================= // BasicBlock functions #ifdef DEBUG // This is a debug flag we will use to assert when creating block during codegen // as this interferes with procedure splitting. If you know what you're doing, set // it to true before creating the block. (DEBUG only) bool fgSafeBasicBlockCreation; #endif BasicBlock* bbNewBasicBlock(BBjumpKinds jumpKind); void placeLoopAlignInstructions(); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX LclVarsInfo XX XX XX XX The variables to be used by the code generator. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // // For both PROMOTION_TYPE_NONE and PROMOTION_TYPE_DEPENDENT the struct will // be placed in the stack frame and it's fields must be laid out sequentially. // // For PROMOTION_TYPE_INDEPENDENT each of the struct's fields is replaced by // a local variable that can be enregistered or placed in the stack frame. // The fields do not need to be laid out sequentially // enum lvaPromotionType { PROMOTION_TYPE_NONE, // The struct local is not promoted PROMOTION_TYPE_INDEPENDENT, // The struct local is promoted, // and its field locals are independent of its parent struct local. PROMOTION_TYPE_DEPENDENT // The struct local is promoted, // but its field locals depend on its parent struct local. }; /*****************************************************************************/ enum FrameLayoutState { NO_FRAME_LAYOUT, INITIAL_FRAME_LAYOUT, PRE_REGALLOC_FRAME_LAYOUT, REGALLOC_FRAME_LAYOUT, TENTATIVE_FRAME_LAYOUT, FINAL_FRAME_LAYOUT }; public: RefCountState lvaRefCountState; // Current local ref count state bool lvaLocalVarRefCounted() const { return lvaRefCountState == RCS_NORMAL; } bool lvaTrackedFixed; // true: We cannot add new 'tracked' variable unsigned lvaCount; // total number of locals, which includes function arguments, // special arguments, IL local variables, and JIT temporary variables LclVarDsc* lvaTable; // variable descriptor table unsigned lvaTableCnt; // lvaTable size (>= lvaCount) unsigned lvaTrackedCount; // actual # of locals being tracked unsigned lvaTrackedCountInSizeTUnits; // min # of size_t's sufficient to hold a bit for all the locals being tracked #ifdef DEBUG VARSET_TP lvaTrackedVars; // set of tracked variables #endif #ifndef TARGET_64BIT VARSET_TP lvaLongVars; // set of long (64-bit) variables #endif VARSET_TP lvaFloatVars; // set of floating-point (32-bit and 64-bit) variables unsigned lvaCurEpoch; // VarSets are relative to a specific set of tracked var indices. // It that changes, this changes. VarSets from different epochs // cannot be meaningfully combined. unsigned GetCurLVEpoch() { return lvaCurEpoch; } // reverse map of tracked number to var number unsigned lvaTrackedToVarNumSize; unsigned* lvaTrackedToVarNum; #if DOUBLE_ALIGN #ifdef DEBUG // # of procs compiled a with double-aligned stack static unsigned s_lvaDoubleAlignedProcsCount; #endif #endif // Getters and setters for address-exposed and do-not-enregister local var properties. bool lvaVarAddrExposed(unsigned varNum) const; void lvaSetVarAddrExposed(unsigned varNum DEBUGARG(AddressExposedReason reason)); void lvaSetVarLiveInOutOfHandler(unsigned varNum); bool lvaVarDoNotEnregister(unsigned varNum); void lvSetMinOptsDoNotEnreg(); bool lvaEnregEHVars; bool lvaEnregMultiRegVars; void lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason)); unsigned lvaVarargsHandleArg; #ifdef TARGET_X86 unsigned lvaVarargsBaseOfStkArgs; // Pointer (computed based on incoming varargs handle) to the start of the stack // arguments #endif // TARGET_X86 unsigned lvaInlinedPInvokeFrameVar; // variable representing the InlinedCallFrame unsigned lvaReversePInvokeFrameVar; // variable representing the reverse PInvoke frame #if FEATURE_FIXED_OUT_ARGS unsigned lvaPInvokeFrameRegSaveVar; // variable representing the RegSave for PInvoke inlining. #endif unsigned lvaMonAcquired; // boolean variable introduced into in synchronized methods // that tracks whether the lock has been taken unsigned lvaArg0Var; // The lclNum of arg0. Normally this will be info.compThisArg. // However, if there is a "ldarga 0" or "starg 0" in the IL, // we will redirect all "ldarg(a) 0" and "starg 0" to this temp. unsigned lvaInlineeReturnSpillTemp; // The temp to spill the non-VOID return expression // in case there are multiple BBJ_RETURN blocks in the inlinee // or if the inlinee has GC ref locals. #if FEATURE_FIXED_OUT_ARGS unsigned lvaOutgoingArgSpaceVar; // dummy TYP_LCLBLK var for fixed outgoing argument space PhasedVar<unsigned> lvaOutgoingArgSpaceSize; // size of fixed outgoing argument space #endif // FEATURE_FIXED_OUT_ARGS static unsigned GetOutgoingArgByteSize(unsigned sizeWithoutPadding) { return roundUp(sizeWithoutPadding, TARGET_POINTER_SIZE); } // Variable representing the return address. The helper-based tailcall // mechanism passes the address of the return address to a runtime helper // where it is used to detect tail-call chains. unsigned lvaRetAddrVar; #if defined(DEBUG) && defined(TARGET_XARCH) unsigned lvaReturnSpCheck; // Stores SP to confirm it is not corrupted on return. #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) unsigned lvaCallSpCheck; // Stores SP to confirm it is not corrupted after every call. #endif // defined(DEBUG) && defined(TARGET_X86) bool lvaGenericsContextInUse; bool lvaKeepAliveAndReportThis(); // Synchronized instance method of a reference type, or // CORINFO_GENERICS_CTXT_FROM_THIS? bool lvaReportParamTypeArg(); // Exceptions and CORINFO_GENERICS_CTXT_FROM_PARAMTYPEARG? //------------------------------------------------------------------------- // All these frame offsets are inter-related and must be kept in sync #if !defined(FEATURE_EH_FUNCLETS) // This is used for the callable handlers unsigned lvaShadowSPslotsVar; // TYP_BLK variable for all the shadow SP slots #endif // FEATURE_EH_FUNCLETS int lvaCachedGenericContextArgOffs; int lvaCachedGenericContextArgOffset(); // For CORINFO_CALLCONV_PARAMTYPE and if generic context is passed as // THIS pointer #ifdef JIT32_GCENCODER unsigned lvaLocAllocSPvar; // variable which stores the value of ESP after the the last alloca/localloc #endif // JIT32_GCENCODER unsigned lvaNewObjArrayArgs; // variable with arguments for new MD array helper // TODO-Review: Prior to reg predict we reserve 24 bytes for Spill temps. // after the reg predict we will use a computed maxTmpSize // which is based upon the number of spill temps predicted by reg predict // All this is necessary because if we under-estimate the size of the spill // temps we could fail when encoding instructions that reference stack offsets for ARM. // // Pre codegen max spill temp size. static const unsigned MAX_SPILL_TEMP_SIZE = 24; //------------------------------------------------------------------------- unsigned lvaGetMaxSpillTempSize(); #ifdef TARGET_ARM bool lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask); #endif // TARGET_ARM void lvaAssignFrameOffsets(FrameLayoutState curState); void lvaFixVirtualFrameOffsets(); void lvaUpdateArgWithInitialReg(LclVarDsc* varDsc); void lvaUpdateArgsWithInitialReg(); void lvaAssignVirtualFrameOffsetsToArgs(); #ifdef UNIX_AMD64_ABI int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs, int* callerArgOffset); #else // !UNIX_AMD64_ABI int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs); #endif // !UNIX_AMD64_ABI void lvaAssignVirtualFrameOffsetsToLocals(); int lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs); #ifdef TARGET_AMD64 // Returns true if compCalleeRegsPushed (including RBP if used as frame pointer) is even. bool lvaIsCalleeSavedIntRegCountEven(); #endif void lvaAlignFrame(); void lvaAssignFrameOffsetsToPromotedStructs(); int lvaAllocateTemps(int stkOffs, bool mustDoubleAlign); #ifdef DEBUG void lvaDumpRegLocation(unsigned lclNum); void lvaDumpFrameLocation(unsigned lclNum); void lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth = 6); void lvaTableDump(FrameLayoutState curState = NO_FRAME_LAYOUT); // NO_FRAME_LAYOUT means use the current frame // layout state defined by lvaDoneFrameLayout #endif // Limit frames size to 1GB. The maximum is 2GB in theory - make it intentionally smaller // to avoid bugs from borderline cases. #define MAX_FrameSize 0x3FFFFFFF void lvaIncrementFrameSize(unsigned size); unsigned lvaFrameSize(FrameLayoutState curState); // Returns the caller-SP-relative offset for the SP/FP relative offset determined by FP based. int lvaToCallerSPRelativeOffset(int offs, bool isFpBased, bool forRootFrame = true) const; // Returns the caller-SP-relative offset for the local variable "varNum." int lvaGetCallerSPRelativeOffset(unsigned varNum); // Returns the SP-relative offset for the local variable "varNum". Illegal to ask this for functions with localloc. int lvaGetSPRelativeOffset(unsigned varNum); int lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased); int lvaGetInitialSPRelativeOffset(unsigned varNum); // True if this is an OSR compilation and this local is potentially // located on the original method stack frame. bool lvaIsOSRLocal(unsigned varNum); //------------------------ For splitting types ---------------------------- void lvaInitTypeRef(); void lvaInitArgs(InitVarDscInfo* varDscInfo); void lvaInitThisPtr(InitVarDscInfo* varDscInfo); void lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBufReg); void lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, unsigned takeArgs); void lvaInitGenericsCtxt(InitVarDscInfo* varDscInfo); void lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo); void lvaInitVarDsc(LclVarDsc* varDsc, unsigned varNum, CorInfoType corInfoType, CORINFO_CLASS_HANDLE typeHnd, CORINFO_ARG_LIST_HANDLE varList, CORINFO_SIG_INFO* varSig); static unsigned lvaTypeRefMask(var_types type); var_types lvaGetActualType(unsigned lclNum); var_types lvaGetRealType(unsigned lclNum); //------------------------------------------------------------------------- void lvaInit(); LclVarDsc* lvaGetDesc(unsigned lclNum) { assert(lclNum < lvaCount); return &lvaTable[lclNum]; } LclVarDsc* lvaGetDesc(unsigned lclNum) const { assert(lclNum < lvaCount); return &lvaTable[lclNum]; } LclVarDsc* lvaGetDesc(const GenTreeLclVarCommon* lclVar) { return lvaGetDesc(lclVar->GetLclNum()); } unsigned lvaTrackedIndexToLclNum(unsigned trackedIndex) { assert(trackedIndex < lvaTrackedCount); unsigned lclNum = lvaTrackedToVarNum[trackedIndex]; assert(lclNum < lvaCount); return lclNum; } LclVarDsc* lvaGetDescByTrackedIndex(unsigned trackedIndex) { return lvaGetDesc(lvaTrackedIndexToLclNum(trackedIndex)); } unsigned lvaGetLclNum(const LclVarDsc* varDsc) { assert((lvaTable <= varDsc) && (varDsc < lvaTable + lvaCount)); // varDsc must point within the table assert(((char*)varDsc - (char*)lvaTable) % sizeof(LclVarDsc) == 0); // varDsc better not point in the middle of a variable unsigned varNum = (unsigned)(varDsc - lvaTable); assert(varDsc == &lvaTable[varNum]); return varNum; } unsigned lvaLclSize(unsigned varNum); unsigned lvaLclExactSize(unsigned varNum); bool lvaHaveManyLocals() const; unsigned lvaGrabTemp(bool shortLifetime DEBUGARG(const char* reason)); unsigned lvaGrabTemps(unsigned cnt DEBUGARG(const char* reason)); unsigned lvaGrabTempWithImplicitUse(bool shortLifetime DEBUGARG(const char* reason)); void lvaSortByRefCount(); void lvaMarkLocalVars(); // Local variable ref-counting void lvaComputeRefCounts(bool isRecompute, bool setSlotNumbers); void lvaMarkLocalVars(BasicBlock* block, bool isRecompute); void lvaAllocOutgoingArgSpaceVar(); // Set up lvaOutgoingArgSpaceVar VARSET_VALRET_TP lvaStmtLclMask(Statement* stmt); #ifdef DEBUG struct lvaStressLclFldArgs { Compiler* m_pCompiler; bool m_bFirstPass; }; static fgWalkPreFn lvaStressLclFldCB; void lvaStressLclFld(); void lvaDispVarSet(VARSET_VALARG_TP set, VARSET_VALARG_TP allVars); void lvaDispVarSet(VARSET_VALARG_TP set); #endif #ifdef TARGET_ARM int lvaFrameAddress(int varNum, bool mustBeFPBased, regNumber* pBaseReg, int addrModeOffset, bool isFloatUsage); #else int lvaFrameAddress(int varNum, bool* pFPbased); #endif bool lvaIsParameter(unsigned varNum); bool lvaIsRegArgument(unsigned varNum); bool lvaIsOriginalThisArg(unsigned varNum); // Is this varNum the original this argument? bool lvaIsOriginalThisReadOnly(); // return true if there is no place in the code // that writes to arg0 // For x64 this is 3, 5, 6, 7, >8 byte structs that are passed by reference. // For ARM64, this is structs larger than 16 bytes that are passed by reference. bool lvaIsImplicitByRefLocal(unsigned varNum) { #if defined(TARGET_AMD64) || defined(TARGET_ARM64) LclVarDsc* varDsc = lvaGetDesc(varNum); if (varDsc->lvIsImplicitByRef) { assert(varDsc->lvIsParam); assert(varTypeIsStruct(varDsc) || (varDsc->lvType == TYP_BYREF)); return true; } #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) return false; } // Returns true if this local var is a multireg struct bool lvaIsMultiregStruct(LclVarDsc* varDsc, bool isVararg); // If the local is a TYP_STRUCT, get/set a class handle describing it CORINFO_CLASS_HANDLE lvaGetStruct(unsigned varNum); void lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool unsafeValueClsCheck, bool setTypeInfo = true); void lvaSetStructUsedAsVarArg(unsigned varNum); // If the local is TYP_REF, set or update the associated class information. void lvaSetClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false); void lvaSetClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr); void lvaUpdateClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false); void lvaUpdateClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr); #define MAX_NumOfFieldsInPromotableStruct 4 // Maximum number of fields in promotable struct // Info about struct type fields. struct lvaStructFieldInfo { CORINFO_FIELD_HANDLE fldHnd; unsigned char fldOffset; unsigned char fldOrdinal; var_types fldType; unsigned fldSize; CORINFO_CLASS_HANDLE fldTypeHnd; lvaStructFieldInfo() : fldHnd(nullptr), fldOffset(0), fldOrdinal(0), fldType(TYP_UNDEF), fldSize(0), fldTypeHnd(nullptr) { } }; // Info about a struct type, instances of which may be candidates for promotion. struct lvaStructPromotionInfo { CORINFO_CLASS_HANDLE typeHnd; bool canPromote; bool containsHoles; bool customLayout; bool fieldsSorted; unsigned char fieldCnt; lvaStructFieldInfo fields[MAX_NumOfFieldsInPromotableStruct]; lvaStructPromotionInfo(CORINFO_CLASS_HANDLE typeHnd = nullptr) : typeHnd(typeHnd) , canPromote(false) , containsHoles(false) , customLayout(false) , fieldsSorted(false) , fieldCnt(0) { } }; struct lvaFieldOffsetCmp { bool operator()(const lvaStructFieldInfo& field1, const lvaStructFieldInfo& field2); }; // This class is responsible for checking validity and profitability of struct promotion. // If it is both legal and profitable, then TryPromoteStructVar promotes the struct and initializes // nessesary information for fgMorphStructField to use. class StructPromotionHelper { public: StructPromotionHelper(Compiler* compiler); bool CanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd); bool TryPromoteStructVar(unsigned lclNum); void Clear() { structPromotionInfo.typeHnd = NO_CLASS_HANDLE; } #ifdef DEBUG void CheckRetypedAsScalar(CORINFO_FIELD_HANDLE fieldHnd, var_types requestedType); #endif // DEBUG private: bool CanPromoteStructVar(unsigned lclNum); bool ShouldPromoteStructVar(unsigned lclNum); void PromoteStructVar(unsigned lclNum); void SortStructFields(); bool CanConstructAndPromoteField(lvaStructPromotionInfo* structPromotionInfo); lvaStructFieldInfo GetFieldInfo(CORINFO_FIELD_HANDLE fieldHnd, BYTE ordinal); bool TryPromoteStructField(lvaStructFieldInfo& outerFieldInfo); private: Compiler* compiler; lvaStructPromotionInfo structPromotionInfo; #ifdef DEBUG typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<CORINFO_FIELD_STRUCT_>, var_types> RetypedAsScalarFieldsMap; RetypedAsScalarFieldsMap retypedFieldsMap; #endif // DEBUG }; StructPromotionHelper* structPromotionHelper; unsigned lvaGetFieldLocal(const LclVarDsc* varDsc, unsigned int fldOffset); lvaPromotionType lvaGetPromotionType(const LclVarDsc* varDsc); lvaPromotionType lvaGetPromotionType(unsigned varNum); lvaPromotionType lvaGetParentPromotionType(const LclVarDsc* varDsc); lvaPromotionType lvaGetParentPromotionType(unsigned varNum); bool lvaIsFieldOfDependentlyPromotedStruct(const LclVarDsc* varDsc); bool lvaIsGCTracked(const LclVarDsc* varDsc); #if defined(FEATURE_SIMD) bool lvaMapSimd12ToSimd16(const LclVarDsc* varDsc) { assert(varDsc->lvType == TYP_SIMD12); assert(varDsc->lvExactSize == 12); #if defined(TARGET_64BIT) assert(compMacOsArm64Abi() || varDsc->lvSize() == 16); #endif // defined(TARGET_64BIT) // We make local variable SIMD12 types 16 bytes instead of just 12. // lvSize() will return 16 bytes for SIMD12, even for fields. // However, we can't do that mapping if the var is a dependently promoted struct field. // Such a field must remain its exact size within its parent struct unless it is a single // field *and* it is the only field in a struct of 16 bytes. if (varDsc->lvSize() != 16) { return false; } if (lvaIsFieldOfDependentlyPromotedStruct(varDsc)) { LclVarDsc* parentVarDsc = lvaGetDesc(varDsc->lvParentLcl); return (parentVarDsc->lvFieldCnt == 1) && (parentVarDsc->lvSize() == 16); } return true; } #endif // defined(FEATURE_SIMD) unsigned lvaGSSecurityCookie; // LclVar number bool lvaTempsHaveLargerOffsetThanVars(); // Returns "true" iff local variable "lclNum" is in SSA form. bool lvaInSsa(unsigned lclNum) { assert(lclNum < lvaCount); return lvaTable[lclNum].lvInSsa; } unsigned lvaStubArgumentVar; // variable representing the secret stub argument coming in EAX #if defined(FEATURE_EH_FUNCLETS) unsigned lvaPSPSym; // variable representing the PSPSym #endif InlineInfo* impInlineInfo; // Only present for inlinees InlineStrategy* m_inlineStrategy; InlineContext* compInlineContext; // Always present // The Compiler* that is the root of the inlining tree of which "this" is a member. Compiler* impInlineRoot(); #if defined(DEBUG) || defined(INLINE_DATA) unsigned __int64 getInlineCycleCount() { return m_compCycles; } #endif // defined(DEBUG) || defined(INLINE_DATA) bool fgNoStructPromotion; // Set to TRUE to turn off struct promotion for this method. bool fgNoStructParamPromotion; // Set to TRUE to turn off struct promotion for parameters this method. //========================================================================= // PROTECTED //========================================================================= protected: //---------------- Local variable ref-counting ---------------------------- void lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, bool isRecompute); bool IsDominatedByExceptionalEntry(BasicBlock* block); void SetVolatileHint(LclVarDsc* varDsc); // Keeps the mapping from SSA #'s to VN's for the implicit memory variables. SsaDefArray<SsaMemDef> lvMemoryPerSsaData; public: // Returns the address of the per-Ssa data for memory at the given ssaNum (which is required // not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is // not an SSA variable). SsaMemDef* GetMemoryPerSsaData(unsigned ssaNum) { return lvMemoryPerSsaData.GetSsaDef(ssaNum); } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Importer XX XX XX XX Imports the given method and converts it to semantic trees XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ private: // For prefixFlags enum { PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix PREFIX_TAILCALL_IMPLICIT = 0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix PREFIX_TAILCALL_STRESS = 0x00000100, // call doesn't "tail" IL prefix but is treated as explicit because of tail call stress PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT | PREFIX_TAILCALL_STRESS), PREFIX_VOLATILE = 0x00001000, PREFIX_UNALIGNED = 0x00010000, PREFIX_CONSTRAINED = 0x00100000, PREFIX_READONLY = 0x01000000 }; static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix); static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp); static bool impOpcodeIsCallOpcode(OPCODE opcode); public: void impInit(); void impImport(); CORINFO_CLASS_HANDLE impGetRefAnyClass(); CORINFO_CLASS_HANDLE impGetRuntimeArgumentHandle(); CORINFO_CLASS_HANDLE impGetTypeHandleClass(); CORINFO_CLASS_HANDLE impGetStringClass(); CORINFO_CLASS_HANDLE impGetObjectClass(); // Returns underlying type of handles returned by ldtoken instruction var_types GetRuntimeHandleUnderlyingType() { // RuntimeTypeHandle is backed by raw pointer on CoreRT and by object reference on other runtimes return IsTargetAbi(CORINFO_CORERT_ABI) ? TYP_I_IMPL : TYP_REF; } void impDevirtualizeCall(GenTreeCall* call, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_METHOD_HANDLE* method, unsigned* methodFlags, CORINFO_CONTEXT_HANDLE* contextHandle, CORINFO_CONTEXT_HANDLE* exactContextHandle, bool isLateDevirtualization, bool isExplicitTailCall, IL_OFFSET ilOffset = BAD_IL_OFFSET); //========================================================================= // PROTECTED //========================================================================= protected: //-------------------- Stack manipulation --------------------------------- unsigned impStkSize; // Size of the full stack #define SMALL_STACK_SIZE 16 // number of elements in impSmallStack struct SavedStack // used to save/restore stack contents. { unsigned ssDepth; // number of values on stack StackEntry* ssTrees; // saved tree values }; bool impIsPrimitive(CorInfoType type); bool impILConsumesAddr(const BYTE* codeAddr); void impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind); void impPushOnStack(GenTree* tree, typeInfo ti); void impPushNullObjRefOnStack(); StackEntry impPopStack(); StackEntry& impStackTop(unsigned n = 0); unsigned impStackHeight(); void impSaveStackState(SavedStack* savePtr, bool copy); void impRestoreStackState(SavedStack* savePtr); GenTree* impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); int impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const BYTE* codeAddr, const BYTE* codeEndp, bool makeInlineObservation = false); void impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken); void impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); bool impCanPInvokeInline(); bool impCanPInvokeInlineCallSite(BasicBlock* block); void impCheckForPInvokeCall( GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block); GenTreeCall* impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di = DebugInfo()); void impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig); void impInsertHelperCall(CORINFO_HELPER_DESC* helperCall); void impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall); void impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall); var_types impImportCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a // type parameter? GenTree* newobjThis, int prefixFlags, CORINFO_CALL_INFO* callInfo, IL_OFFSET rawILOffset); CORINFO_CLASS_HANDLE impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE specialIntrinsicHandle); bool impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv); GenTree* impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd); GenTree* impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension unmgdCallConv); #ifdef DEBUG var_types impImportJitTestLabelMark(int numArgs); #endif // DEBUG GenTree* impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken); GenTree* impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp); GenTree* impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp); static void impBashVarAddrsToI(GenTree* tree1, GenTree* tree2 = nullptr); GenTree* impImplicitIorI4Cast(GenTree* tree, var_types dstTyp); GenTree* impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp); void impImportLeave(BasicBlock* block); void impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr); GenTree* impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom); // Mirrors StringComparison.cs enum StringComparison { Ordinal = 4, OrdinalIgnoreCase = 5 }; enum StringComparisonJoint { Eq, // (d1 == cns1) && (s2 == cns2) Xor, // (d1 ^ cns1) | (s2 ^ cns2) }; GenTree* impStringEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags); GenTree* impSpanEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags); GenTree* impExpandHalfConstEquals(GenTreeLclVar* data, GenTree* lengthFld, bool checkForNull, bool startsWith, WCHAR* cnsData, int len, int dataOffset, StringComparison cmpMode); GenTree* impCreateCompareInd(GenTreeLclVar* obj, var_types type, ssize_t offset, ssize_t value, StringComparison ignoreCase, StringComparisonJoint joint = Eq); GenTree* impExpandHalfConstEqualsSWAR( GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset, StringComparison cmpMode); GenTree* impExpandHalfConstEqualsSIMD( GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset, StringComparison cmpMode); GenTreeStrCon* impGetStrConFromSpan(GenTree* span); GenTree* impIntrinsic(GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef, bool readonlyCall, bool tailCall, CORINFO_RESOLVED_TOKEN* pContstrainedResolvedToken, CORINFO_THIS_TRANSFORM constraintCallThisTransform, NamedIntrinsic* pIntrinsicName, bool* isSpecialIntrinsic = nullptr); GenTree* impMathIntrinsic(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, var_types callType, NamedIntrinsic intrinsicName, bool tailCall); NamedIntrinsic lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method); GenTree* impUnsupportedNamedIntrinsic(unsigned helper, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand); #ifdef FEATURE_HW_INTRINSICS GenTree* impHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand); GenTree* impSimdAsHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, GenTree* newobjThis); protected: bool compSupportsHWIntrinsic(CORINFO_InstructionSet isa); GenTree* impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, var_types retType, CorInfoType simdBaseJitType, unsigned simdSize, GenTree* newobjThis); GenTree* impSpecialIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType, var_types retType, unsigned simdSize); GenTree* getArgForHWIntrinsic(var_types argType, CORINFO_CLASS_HANDLE argClass, bool expectAddr = false, GenTree* newobjThis = nullptr); GenTree* impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, CorInfoType simdBaseJitType); GenTree* addRangeCheckIfNeeded( NamedIntrinsic intrinsic, GenTree* immOp, bool mustExpand, int immLowerBound, int immUpperBound); GenTree* addRangeCheckForHWIntrinsic(GenTree* immOp, int immLowerBound, int immUpperBound); #ifdef TARGET_XARCH GenTree* impBaseIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType, var_types retType, unsigned simdSize); GenTree* impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impAvxOrAvx2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impBMI1OrBMI2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); #endif // TARGET_XARCH #endif // FEATURE_HW_INTRINSICS GenTree* impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, NamedIntrinsic intrinsicName); GenTree* impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig); GenTree* impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig); GenTree* impKeepAliveIntrinsic(GenTree* objToKeepAlive); GenTree* impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); GenTree* impTransformThis(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM transform); //----------------- Manipulating the trees and stmts ---------------------- Statement* impStmtList; // Statements for the BB being imported. Statement* impLastStmt; // The last statement for the current BB. public: enum { CHECK_SPILL_ALL = -1, CHECK_SPILL_NONE = -2 }; void impBeginTreeList(); void impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt); void impEndTreeList(BasicBlock* block); void impAppendStmtCheck(Statement* stmt, unsigned chkLevel); void impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo = true); void impAppendStmt(Statement* stmt); void impInsertStmtBefore(Statement* stmt, Statement* stmtBefore); Statement* impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo = true); void impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore); void impAssignTempGen(unsigned tmp, GenTree* val, unsigned curLevel = (unsigned)CHECK_SPILL_NONE, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); void impAssignTempGen(unsigned tmpNum, GenTree* val, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); Statement* impExtractLastStmt(); GenTree* impCloneExpr(GenTree* tree, GenTree** clone, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt DEBUGARG(const char* reason)); GenTree* impAssignStruct(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* impAssignStructPtr(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref); var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* simdBaseJitType = nullptr); GenTree* impNormStructVal(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool forceNormalization = false); GenTree* impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup = nullptr, bool mustRestoreHandle = false, bool importParent = false); GenTree* impParentClassTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup = nullptr, bool mustRestoreHandle = false) { return impTokenToHandle(pResolvedToken, pRuntimeLookup, mustRestoreHandle, true); } GenTree* impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle); GenTree* getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind); GenTree* impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle); GenTree* impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle); GenTreeCall* impReadyToRunHelperToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoHelpFunc helper, var_types type, GenTreeCall::Use* args = nullptr, CORINFO_LOOKUP_KIND* pGenericLookupKind = nullptr); bool impIsCastHelperEligibleForClassProbe(GenTree* tree); bool impIsCastHelperMayHaveProfileData(GenTree* tree); GenTree* impCastClassOrIsInstToTree( GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset); GenTree* impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass); bool VarTypeIsMultiByteAndCanEnreg(var_types type, CORINFO_CLASS_HANDLE typeClass, unsigned* typeSize, bool forReturn, bool isVarArg, CorInfoCallConvExtension callConv); bool IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName); bool IsTargetIntrinsic(NamedIntrinsic intrinsicName); bool IsMathIntrinsic(NamedIntrinsic intrinsicName); bool IsMathIntrinsic(GenTree* tree); private: //----------------- Importing the method ---------------------------------- CORINFO_CONTEXT_HANDLE impTokenLookupContextHandle; // The context used for looking up tokens. #ifdef DEBUG unsigned impCurOpcOffs; const char* impCurOpcName; bool impNestedStackSpill; // For displaying instrs with generated native code (-n:B) Statement* impLastILoffsStmt; // oldest stmt added for which we did not call SetLastILOffset(). void impNoteLastILoffs(); #endif // Debug info of current statement being imported. It gets set to contain // no IL location (!impCurStmtDI.GetLocation().IsValid) after it has been // set in the appended trees. Then it gets updated at IL instructions for // which we have to report mapping info. // It will always contain the current inline context. DebugInfo impCurStmtDI; DebugInfo impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall); void impCurStmtOffsSet(IL_OFFSET offs); void impNoteBranchOffs(); unsigned impInitBlockLineInfo(); bool impIsThis(GenTree* obj); bool impIsLDFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr); bool impIsDUP_LDVIRTFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr); bool impIsAnySTLOC(OPCODE opcode) { return ((opcode == CEE_STLOC) || (opcode == CEE_STLOC_S) || ((opcode >= CEE_STLOC_0) && (opcode <= CEE_STLOC_3))); } GenTreeCall::Use* impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs = nullptr); bool impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const; GenTreeCall::Use* impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount = 0); //---------------- Spilling the importer stack ---------------------------- // The maximum number of bytes of IL processed without clean stack state. // It allows to limit the maximum tree size and depth. static const unsigned MAX_TREE_SIZE = 200; bool impCanSpillNow(OPCODE prevOpcode); struct PendingDsc { PendingDsc* pdNext; BasicBlock* pdBB; SavedStack pdSavedStack; ThisInitState pdThisPtrInit; }; PendingDsc* impPendingList; // list of BBs currently waiting to be imported. PendingDsc* impPendingFree; // Freed up dscs that can be reused // We keep a byte-per-block map (dynamically extended) in the top-level Compiler object of a compilation. JitExpandArray<BYTE> impPendingBlockMembers; // Return the byte for "b" (allocating/extending impPendingBlockMembers if necessary.) // Operates on the map in the top-level ancestor. BYTE impGetPendingBlockMember(BasicBlock* blk) { return impInlineRoot()->impPendingBlockMembers.Get(blk->bbInd()); } // Set the byte for "b" to "val" (allocating/extending impPendingBlockMembers if necessary.) // Operates on the map in the top-level ancestor. void impSetPendingBlockMember(BasicBlock* blk, BYTE val) { impInlineRoot()->impPendingBlockMembers.Set(blk->bbInd(), val); } bool impCanReimport; bool impSpillStackEntry(unsigned level, unsigned varNum #ifdef DEBUG , bool bAssertOnRecursion, const char* reason #endif ); void impSpillStackEnsure(bool spillLeaves = false); void impEvalSideEffects(); void impSpillSpecialSideEff(); void impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)); void impSpillValueClasses(); void impSpillEvalStack(); static fgWalkPreFn impFindValueClasses; void impSpillLclRefs(ssize_t lclNum); BasicBlock* impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter); bool impBlockIsInALoop(BasicBlock* block); void impImportBlockCode(BasicBlock* block); void impReimportMarkBlock(BasicBlock* block); void impReimportMarkSuccessors(BasicBlock* block); void impVerifyEHBlock(BasicBlock* block, bool isTryStart); void impImportBlockPending(BasicBlock* block); // Similar to impImportBlockPending, but assumes that block has already been imported once and is being // reimported for some reason. It specifically does *not* look at verCurrentState to set the EntryState // for the block, but instead, just re-uses the block's existing EntryState. void impReimportBlockPending(BasicBlock* block); var_types impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2); void impImportBlock(BasicBlock* block); // Assumes that "block" is a basic block that completes with a non-empty stack. We will assign the values // on the stack to local variables (the "spill temp" variables). The successor blocks will assume that // its incoming stack contents are in those locals. This requires "block" and its successors to agree on // the variables that will be used -- and for all the predecessors of those successors, and the // successors of those predecessors, etc. Call such a set of blocks closed under alternating // successor/predecessor edges a "spill clique." A block is a "predecessor" or "successor" member of the // clique (or, conceivably, both). Each block has a specified sequence of incoming and outgoing spill // temps. If "block" already has its outgoing spill temps assigned (they are always a contiguous series // of local variable numbers, so we represent them with the base local variable number), returns that. // Otherwise, picks a set of spill temps, and propagates this choice to all blocks in the spill clique of // which "block" is a member (asserting, in debug mode, that no block in this clique had its spill temps // chosen already. More precisely, that the incoming or outgoing spill temps are not chosen, depending // on which kind of member of the clique the block is). unsigned impGetSpillTmpBase(BasicBlock* block); // Assumes that "block" is a basic block that completes with a non-empty stack. We have previously // assigned the values on the stack to local variables (the "spill temp" variables). The successor blocks // will assume that its incoming stack contents are in those locals. This requires "block" and its // successors to agree on the variables and their types that will be used. The CLI spec allows implicit // conversions between 'int' and 'native int' or 'float' and 'double' stack types. So one predecessor can // push an int and another can push a native int. For 64-bit we have chosen to implement this by typing // the "spill temp" as native int, and then importing (or re-importing as needed) so that all the // predecessors in the "spill clique" push a native int (sign-extending if needed), and all the // successors receive a native int. Similarly float and double are unified to double. // This routine is called after a type-mismatch is detected, and it will walk the spill clique to mark // blocks for re-importation as appropriate (both successors, so they get the right incoming type, and // predecessors, so they insert an upcast if needed). void impReimportSpillClique(BasicBlock* block); // When we compute a "spill clique" (see above) these byte-maps are allocated to have a byte per basic // block, and represent the predecessor and successor members of the clique currently being computed. // *** Access to these will need to be locked in a parallel compiler. JitExpandArray<BYTE> impSpillCliquePredMembers; JitExpandArray<BYTE> impSpillCliqueSuccMembers; enum SpillCliqueDir { SpillCliquePred, SpillCliqueSucc }; // Abstract class for receiving a callback while walking a spill clique class SpillCliqueWalker { public: virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) = 0; }; // This class is used for setting the bbStkTempsIn and bbStkTempsOut on the blocks within a spill clique class SetSpillTempsBase : public SpillCliqueWalker { unsigned m_baseTmp; public: SetSpillTempsBase(unsigned baseTmp) : m_baseTmp(baseTmp) { } virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk); }; // This class is used for implementing impReimportSpillClique part on each block within the spill clique class ReimportSpillClique : public SpillCliqueWalker { Compiler* m_pComp; public: ReimportSpillClique(Compiler* pComp) : m_pComp(pComp) { } virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk); }; // This is the heart of the algorithm for walking spill cliques. It invokes callback->Visit for each // predecessor or successor within the spill clique void impWalkSpillCliqueFromPred(BasicBlock* pred, SpillCliqueWalker* callback); // For a BasicBlock that has already been imported, the EntryState has an array of GenTrees for the // incoming locals. This walks that list an resets the types of the GenTrees to match the types of // the VarDscs. They get out of sync when we have int/native int issues (see impReimportSpillClique). void impRetypeEntryStateTemps(BasicBlock* blk); BYTE impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk); void impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val); void impPushVar(GenTree* op, typeInfo tiRetVal); GenTreeLclVar* impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset)); void impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal); void impLoadVar(unsigned lclNum, IL_OFFSET offset) { impLoadVar(lclNum, offset, lvaGetDesc(lclNum)->lvVerTypeInfo); } void impLoadArg(unsigned ilArgNum, IL_OFFSET offset); void impLoadLoc(unsigned ilLclNum, IL_OFFSET offset); bool impReturnInstruction(int prefixFlags, OPCODE& opcode); #ifdef TARGET_ARM void impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* op, CORINFO_CLASS_HANDLE hClass); #endif // A free list of linked list nodes used to represent to-do stacks of basic blocks. struct BlockListNode { BasicBlock* m_blk; BlockListNode* m_next; BlockListNode(BasicBlock* blk, BlockListNode* next = nullptr) : m_blk(blk), m_next(next) { } void* operator new(size_t sz, Compiler* comp); }; BlockListNode* impBlockListNodeFreeList; void FreeBlockListNode(BlockListNode* node); bool impIsValueType(typeInfo* pTypeInfo); var_types mangleVarArgsType(var_types type); regNumber getCallArgIntRegister(regNumber floatReg); regNumber getCallArgFloatRegister(regNumber intReg); #if defined(DEBUG) static unsigned jitTotalMethodCompiled; #endif #ifdef DEBUG static LONG jitNestingLevel; #endif // DEBUG static bool impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut = nullptr); void impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult); // STATIC inlining decision based on the IL code. void impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle, CORINFO_METHOD_INFO* methInfo, bool forceInline, InlineResult* inlineResult); void impCheckCanInline(GenTreeCall* call, CORINFO_METHOD_HANDLE fncHandle, unsigned methAttr, CORINFO_CONTEXT_HANDLE exactContextHnd, InlineCandidateInfo** ppInlineCandidateInfo, InlineResult* inlineResult); void impInlineRecordArgInfo(InlineInfo* pInlineInfo, GenTree* curArgVal, unsigned argNum, InlineResult* inlineResult); void impInlineInitVars(InlineInfo* pInlineInfo); unsigned impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason)); GenTree* impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclTypeInfo); bool impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo); bool impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree, GenTreeCall::Use* additionalCallArgs, GenTree* dereferencedAddress, InlArgInfo* inlArgInfo); void impMarkInlineCandidate(GenTree* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo); void impMarkInlineCandidateHelper(GenTreeCall* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo); bool impTailCallRetTypeCompatible(bool allowWidening, var_types callerRetType, CORINFO_CLASS_HANDLE callerRetTypeClass, CorInfoCallConvExtension callerCallConv, var_types calleeRetType, CORINFO_CLASS_HANDLE calleeRetTypeClass, CorInfoCallConvExtension calleeCallConv); bool impIsTailCallILPattern( bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive); bool impIsImplicitTailCallCandidate( OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive); bool impIsClassExact(CORINFO_CLASS_HANDLE classHnd); bool impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array); CORINFO_RESOLVED_TOKEN* impAllocateToken(const CORINFO_RESOLVED_TOKEN& token); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX FlowGraph XX XX XX XX Info about the basic-blocks, their contents and the flow analysis XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: BasicBlock* fgFirstBB; // Beginning of the basic block list BasicBlock* fgLastBB; // End of the basic block list BasicBlock* fgFirstColdBlock; // First block to be placed in the cold section BasicBlock* fgEntryBB; // For OSR, the original method's entry point BasicBlock* fgOSREntryBB; // For OSR, the logical entry point (~ patchpoint) #if defined(FEATURE_EH_FUNCLETS) BasicBlock* fgFirstFuncletBB; // First block of outlined funclets (to allow block insertion before the funclets) #endif BasicBlock* fgFirstBBScratch; // Block inserted for initialization stuff. Is nullptr if no such block has been // created. BasicBlockList* fgReturnBlocks; // list of BBJ_RETURN blocks unsigned fgEdgeCount; // # of control flow edges between the BBs unsigned fgBBcount; // # of BBs in the method #ifdef DEBUG unsigned fgBBcountAtCodegen; // # of BBs in the method at the start of codegen #endif unsigned fgBBNumMax; // The max bbNum that has been assigned to basic blocks unsigned fgDomBBcount; // # of BBs for which we have dominator and reachability information BasicBlock** fgBBInvPostOrder; // The flow graph stored in an array sorted in topological order, needed to compute // dominance. Indexed by block number. Size: fgBBNumMax + 1. // After the dominance tree is computed, we cache a DFS preorder number and DFS postorder number to compute // dominance queries in O(1). fgDomTreePreOrder and fgDomTreePostOrder are arrays giving the block's preorder and // postorder number, respectively. The arrays are indexed by basic block number. (Note that blocks are numbered // starting from one. Thus, we always waste element zero. This makes debugging easier and makes the code less likely // to suffer from bugs stemming from forgetting to add or subtract one from the block number to form an array // index). The arrays are of size fgBBNumMax + 1. unsigned* fgDomTreePreOrder; unsigned* fgDomTreePostOrder; // Dominator tree used by SSA construction and copy propagation (the two are expected to use the same tree // in order to avoid the need for SSA reconstruction and an "out of SSA" phase). DomTreeNode* fgSsaDomTree; bool fgBBVarSetsInited; // Allocate array like T* a = new T[fgBBNumMax + 1]; // Using helper so we don't keep forgetting +1. template <typename T> T* fgAllocateTypeForEachBlk(CompMemKind cmk = CMK_Unknown) { return getAllocator(cmk).allocate<T>(fgBBNumMax + 1); } // BlockSets are relative to a specific set of BasicBlock numbers. If that changes // (if the blocks are renumbered), this changes. BlockSets from different epochs // cannot be meaningfully combined. Note that new blocks can be created with higher // block numbers without changing the basic block epoch. These blocks *cannot* // participate in a block set until the blocks are all renumbered, causing the epoch // to change. This is useful if continuing to use previous block sets is valuable. // If the epoch is zero, then it is uninitialized, and block sets can't be used. unsigned fgCurBBEpoch; unsigned GetCurBasicBlockEpoch() { return fgCurBBEpoch; } // The number of basic blocks in the current epoch. When the blocks are renumbered, // this is fgBBcount. As blocks are added, fgBBcount increases, fgCurBBEpochSize remains // the same, until a new BasicBlock epoch is created, such as when the blocks are all renumbered. unsigned fgCurBBEpochSize; // The number of "size_t" elements required to hold a bitset large enough for fgCurBBEpochSize // bits. This is precomputed to avoid doing math every time BasicBlockBitSetTraits::GetArrSize() is called. unsigned fgBBSetCountInSizeTUnits; void NewBasicBlockEpoch() { INDEBUG(unsigned oldEpochArrSize = fgBBSetCountInSizeTUnits); // We have a new epoch. Compute and cache the size needed for new BlockSets. fgCurBBEpoch++; fgCurBBEpochSize = fgBBNumMax + 1; fgBBSetCountInSizeTUnits = roundUp(fgCurBBEpochSize, (unsigned)(sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8); #ifdef DEBUG // All BlockSet objects are now invalid! fgReachabilitySetsValid = false; // the bbReach sets are now invalid! fgEnterBlksSetValid = false; // the fgEnterBlks set is now invalid! if (verbose) { unsigned epochArrSize = BasicBlockBitSetTraits::GetArrSize(this, sizeof(size_t)); printf("\nNew BlockSet epoch %d, # of blocks (including unused BB00): %u, bitset array size: %u (%s)", fgCurBBEpoch, fgCurBBEpochSize, epochArrSize, (epochArrSize <= 1) ? "short" : "long"); if ((fgCurBBEpoch != 1) && ((oldEpochArrSize <= 1) != (epochArrSize <= 1))) { // If we're not just establishing the first epoch, and the epoch array size has changed such that we're // going to change our bitset representation from short (just a size_t bitset) to long (a pointer to an // array of size_t bitsets), then print that out. printf("; NOTE: BlockSet size was previously %s!", (oldEpochArrSize <= 1) ? "short" : "long"); } printf("\n"); } #endif // DEBUG } void EnsureBasicBlockEpoch() { if (fgCurBBEpochSize != fgBBNumMax + 1) { NewBasicBlockEpoch(); } } BasicBlock* fgNewBasicBlock(BBjumpKinds jumpKind); void fgEnsureFirstBBisScratch(); bool fgFirstBBisScratch(); bool fgBBisScratch(BasicBlock* block); void fgExtendEHRegionBefore(BasicBlock* block); void fgExtendEHRegionAfter(BasicBlock* block); BasicBlock* fgNewBBbefore(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion); BasicBlock* fgNewBBafter(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind, unsigned tryIndex, unsigned hndIndex, BasicBlock* nearBlk, bool putInFilter = false, bool runRarely = false, bool insertAtEnd = false); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind, BasicBlock* srcBlk, bool runRarely = false, bool insertAtEnd = false); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind); BasicBlock* fgNewBBinRegionWorker(BBjumpKinds jumpKind, BasicBlock* afterBlk, unsigned xcptnIndex, bool putInTryRegion); void fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk); void fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk); void fgUnlinkBlock(BasicBlock* block); #ifdef FEATURE_JIT_METHOD_PERF unsigned fgMeasureIR(); #endif // FEATURE_JIT_METHOD_PERF bool fgModified; // True if the flow graph has been modified recently bool fgComputePredsDone; // Have we computed the bbPreds list bool fgCheapPredsValid; // Is the bbCheapPreds list valid? bool fgDomsComputed; // Have we computed the dominator sets? bool fgReturnBlocksComputed; // Have we computed the return blocks list? bool fgOptimizedFinally; // Did we optimize any try-finallys? bool fgHasSwitch; // any BBJ_SWITCH jumps? BlockSet fgEnterBlks; // Set of blocks which have a special transfer of control; the "entry" blocks plus EH handler // begin blocks. #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) BlockSet fgAlwaysBlks; // Set of blocks which are BBJ_ALWAYS part of BBJ_CALLFINALLY/BBJ_ALWAYS pair that should // never be removed due to a requirement to use the BBJ_ALWAYS for generating code and // not have "retless" blocks. #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #ifdef DEBUG bool fgReachabilitySetsValid; // Are the bbReach sets valid? bool fgEnterBlksSetValid; // Is the fgEnterBlks set valid? #endif // DEBUG bool fgRemoveRestOfBlock; // true if we know that we will throw bool fgStmtRemoved; // true if we remove statements -> need new DFA // There are two modes for ordering of the trees. // - In FGOrderTree, the dominant ordering is the tree order, and the nodes contained in // each tree and sub-tree are contiguous, and can be traversed (in gtNext/gtPrev order) // by traversing the tree according to the order of the operands. // - In FGOrderLinear, the dominant ordering is the linear order. enum FlowGraphOrder { FGOrderTree, FGOrderLinear }; FlowGraphOrder fgOrder; // The following are boolean flags that keep track of the state of internal data structures bool fgStmtListThreaded; // true if the node list is now threaded bool fgCanRelocateEHRegions; // true if we are allowed to relocate the EH regions bool fgEdgeWeightsComputed; // true after we have called fgComputeEdgeWeights bool fgHaveValidEdgeWeights; // true if we were successful in computing all of the edge weights bool fgSlopUsedInEdgeWeights; // true if their was some slop used when computing the edge weights bool fgRangeUsedInEdgeWeights; // true if some of the edgeWeight are expressed in Min..Max form bool fgNeedsUpdateFlowGraph; // true if we need to run fgUpdateFlowGraph weight_t fgCalledCount; // count of the number of times this method was called // This is derived from the profile data // or is BB_UNITY_WEIGHT when we don't have profile data #if defined(FEATURE_EH_FUNCLETS) bool fgFuncletsCreated; // true if the funclet creation phase has been run #endif // FEATURE_EH_FUNCLETS bool fgGlobalMorph; // indicates if we are during the global morphing phase // since fgMorphTree can be called from several places bool impBoxTempInUse; // the temp below is valid and available unsigned impBoxTemp; // a temporary that is used for boxing #ifdef DEBUG bool jitFallbackCompile; // Are we doing a fallback compile? That is, have we executed a NO_WAY assert, // and we are trying to compile again in a "safer", minopts mode? #endif #if defined(DEBUG) unsigned impInlinedCodeSize; bool fgPrintInlinedMethods; #endif jitstd::vector<flowList*>* fgPredListSortVector; //------------------------------------------------------------------------- void fgInit(); PhaseStatus fgImport(); PhaseStatus fgTransformIndirectCalls(); PhaseStatus fgTransformPatchpoints(); PhaseStatus fgInline(); PhaseStatus fgRemoveEmptyTry(); PhaseStatus fgRemoveEmptyFinally(); PhaseStatus fgMergeFinallyChains(); PhaseStatus fgCloneFinally(); void fgCleanupContinuation(BasicBlock* continuation); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) PhaseStatus fgUpdateFinallyTargetFlags(); void fgClearAllFinallyTargetBits(); void fgAddFinallyTargetFlags(); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) PhaseStatus fgTailMergeThrows(); void fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock, BasicBlock* nonCanonicalBlock, BasicBlock* canonicalBlock, flowList* predEdge); void fgTailMergeThrowsJumpToHelper(BasicBlock* predBlock, BasicBlock* nonCanonicalBlock, BasicBlock* canonicalBlock, flowList* predEdge); GenTree* fgCheckCallArgUpdate(GenTree* parent, GenTree* child, var_types origType); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Sometimes we need to defer updating the BBF_FINALLY_TARGET bit. fgNeedToAddFinallyTargetBits signals // when this is necessary. bool fgNeedToAddFinallyTargetBits; #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) bool fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, BasicBlock* handler, BlockToBlockMap& continuationMap); GenTree* fgGetCritSectOfStaticMethod(); #if defined(FEATURE_EH_FUNCLETS) void fgAddSyncMethodEnterExit(); GenTree* fgCreateMonitorTree(unsigned lvaMonitorBool, unsigned lvaThisVar, BasicBlock* block, bool enter); void fgConvertSyncReturnToLeave(BasicBlock* block); #endif // FEATURE_EH_FUNCLETS void fgAddReversePInvokeEnterExit(); bool fgMoreThanOneReturnBlock(); // The number of separate return points in the method. unsigned fgReturnCount; void fgAddInternal(); enum class FoldResult { FOLD_DID_NOTHING, FOLD_CHANGED_CONTROL_FLOW, FOLD_REMOVED_LAST_STMT, FOLD_ALTERED_LAST_STMT, }; FoldResult fgFoldConditional(BasicBlock* block); void fgMorphStmts(BasicBlock* block); void fgMorphBlocks(); void fgMergeBlockReturn(BasicBlock* block); bool fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg)); void fgSetOptions(); #ifdef DEBUG static fgWalkPreFn fgAssertNoQmark; void fgPreExpandQmarkChecks(GenTree* expr); void fgPostExpandQmarkChecks(); static void fgCheckQmarkAllowedForm(GenTree* tree); #endif IL_OFFSET fgFindBlockILOffset(BasicBlock* block); void fgFixEntryFlowForOSR(); BasicBlock* fgSplitBlockAtBeginning(BasicBlock* curr); BasicBlock* fgSplitBlockAtEnd(BasicBlock* curr); BasicBlock* fgSplitBlockAfterStatement(BasicBlock* curr, Statement* stmt); BasicBlock* fgSplitBlockAfterNode(BasicBlock* curr, GenTree* node); // for LIR BasicBlock* fgSplitEdge(BasicBlock* curr, BasicBlock* succ); Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block, const DebugInfo& di); Statement* fgNewStmtFromTree(GenTree* tree); Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block); Statement* fgNewStmtFromTree(GenTree* tree, const DebugInfo& di); GenTree* fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst = nullptr); void fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt); void fgExpandQmarkStmt(BasicBlock* block, Statement* stmt); void fgExpandQmarkNodes(); // Do "simple lowering." This functionality is (conceptually) part of "general" // lowering that is distributed between fgMorph and the lowering phase of LSRA. void fgSimpleLowering(); GenTree* fgInitThisClass(); GenTreeCall* fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls, CorInfoHelpFunc helper); GenTreeCall* fgGetSharedCCtor(CORINFO_CLASS_HANDLE cls); bool backendRequiresLocalVarLifetimes() { return !opts.MinOpts() || m_pLinearScan->willEnregisterLocalVars(); } void fgLocalVarLiveness(); void fgLocalVarLivenessInit(); void fgPerNodeLocalVarLiveness(GenTree* node); void fgPerBlockLocalVarLiveness(); VARSET_VALRET_TP fgGetHandlerLiveVars(BasicBlock* block); void fgLiveVarAnalysis(bool updateInternalOnly = false); void fgComputeLifeCall(VARSET_TP& life, GenTreeCall* call); void fgComputeLifeTrackedLocalUse(VARSET_TP& life, LclVarDsc& varDsc, GenTreeLclVarCommon* node); bool fgComputeLifeTrackedLocalDef(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, LclVarDsc& varDsc, GenTreeLclVarCommon* node); bool fgComputeLifeUntrackedLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, LclVarDsc& varDsc, GenTreeLclVarCommon* lclVarNode); bool fgComputeLifeLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, GenTree* lclVarNode); void fgComputeLife(VARSET_TP& life, GenTree* startNode, GenTree* endNode, VARSET_VALARG_TP volatileVars, bool* pStmtInfoDirty DEBUGARG(bool* treeModf)); void fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALARG_TP volatileVars); bool fgTryRemoveNonLocal(GenTree* node, LIR::Range* blockRange); void fgRemoveDeadStoreLIR(GenTree* store, BasicBlock* block); bool fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_VALARG_TP life, bool* doAgain, bool* pStmtInfoDirty, bool* pStoreRemoved DEBUGARG(bool* treeModf)); void fgInterBlockLocalVarLiveness(); // Blocks: convenience methods for enabling range-based `for` iteration over the function's blocks, e.g.: // 1. for (BasicBlock* const block : compiler->Blocks()) ... // 2. for (BasicBlock* const block : compiler->Blocks(startBlock)) ... // 3. for (BasicBlock* const block : compiler->Blocks(startBlock, endBlock)) ... // In case (1), the block list can be empty. In case (2), `startBlock` can be nullptr. In case (3), // both `startBlock` and `endBlock` must be non-null. // BasicBlockSimpleList Blocks() const { return BasicBlockSimpleList(fgFirstBB); } BasicBlockSimpleList Blocks(BasicBlock* startBlock) const { return BasicBlockSimpleList(startBlock); } BasicBlockRangeList Blocks(BasicBlock* startBlock, BasicBlock* endBlock) const { return BasicBlockRangeList(startBlock, endBlock); } // The presence of a partial definition presents some difficulties for SSA: this is both a use of some SSA name // of "x", and a def of a new SSA name for "x". The tree only has one local variable for "x", so it has to choose // whether to treat that as the use or def. It chooses the "use", and thus the old SSA name. This map allows us // to record/recover the "def" SSA number, given the lcl var node for "x" in such a tree. typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, unsigned> NodeToUnsignedMap; NodeToUnsignedMap* m_opAsgnVarDefSsaNums; NodeToUnsignedMap* GetOpAsgnVarDefSsaNums() { if (m_opAsgnVarDefSsaNums == nullptr) { m_opAsgnVarDefSsaNums = new (getAllocator()) NodeToUnsignedMap(getAllocator()); } return m_opAsgnVarDefSsaNums; } // This map tracks nodes whose value numbers explicitly or implicitly depend on memory states. // The map provides the entry block of the most closely enclosing loop that // defines the memory region accessed when defining the nodes's VN. // // This information should be consulted when considering hoisting node out of a loop, as the VN // for the node will only be valid within the indicated loop. // // It is not fine-grained enough to track memory dependence within loops, so cannot be used // for more general code motion. // // If a node does not have an entry in the map we currently assume the VN is not memory dependent // and so memory does not constrain hoisting. // typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, BasicBlock*> NodeToLoopMemoryBlockMap; NodeToLoopMemoryBlockMap* m_nodeToLoopMemoryBlockMap; NodeToLoopMemoryBlockMap* GetNodeToLoopMemoryBlockMap() { if (m_nodeToLoopMemoryBlockMap == nullptr) { m_nodeToLoopMemoryBlockMap = new (getAllocator()) NodeToLoopMemoryBlockMap(getAllocator()); } return m_nodeToLoopMemoryBlockMap; } void optRecordLoopMemoryDependence(GenTree* tree, BasicBlock* block, ValueNum memoryVN); void optCopyLoopMemoryDependence(GenTree* fromTree, GenTree* toTree); // Requires value numbering phase to have completed. Returns the value number ("gtVN") of the // "tree," EXCEPT in the case of GTF_VAR_USEASG, because the tree node's gtVN member is the // "use" VN. Performs a lookup into the map of (use asg tree -> def VN.) to return the "def's" // VN. inline ValueNum GetUseAsgDefVNOrTreeVN(GenTree* tree); // Requires that "lcl" has the GTF_VAR_DEF flag set. Returns the SSA number of "lcl". // Except: assumes that lcl is a def, and if it is // a partial def (GTF_VAR_USEASG), looks up and returns the SSA number for the "def", // rather than the "use" SSA number recorded in the tree "lcl". inline unsigned GetSsaNumForLocalVarDef(GenTree* lcl); inline bool PreciseRefCountsRequired(); // Performs SSA conversion. void fgSsaBuild(); // Reset any data structures to the state expected by "fgSsaBuild", so it can be run again. void fgResetForSsa(); unsigned fgSsaPassesCompleted; // Number of times fgSsaBuild has been run. // Returns "true" if this is a special variable that is never zero initialized in the prolog. inline bool fgVarIsNeverZeroInitializedInProlog(unsigned varNum); // Returns "true" if the variable needs explicit zero initialization. inline bool fgVarNeedsExplicitZeroInit(unsigned varNum, bool bbInALoop, bool bbIsReturn); // The value numbers for this compilation. ValueNumStore* vnStore; public: ValueNumStore* GetValueNumStore() { return vnStore; } // Do value numbering (assign a value number to each // tree node). void fgValueNumber(); // Computes new GcHeap VN via the assignment H[elemTypeEq][arrVN][inx][fldSeq] = rhsVN. // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. // The 'indType' is the indirection type of the lhs of the assignment and will typically // match the element type of the array or fldSeq. When this type doesn't match // or if the fldSeq is 'NotAField' we invalidate the array contents H[elemTypeEq][arrVN] // ValueNum fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq, ValueNum arrVN, ValueNum inxVN, FieldSeqNode* fldSeq, ValueNum rhsVN, var_types indType); // Requires that "tree" is a GT_IND marked as an array index, and that its address argument // has been parsed to yield the other input arguments. If evaluation of the address // can raise exceptions, those should be captured in the exception set "addrXvnp". // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. // Marks "tree" with the VN for H[elemTypeEq][arrVN][inx][fldSeq] (for the liberal VN; a new unique // VN for the conservative VN.) Also marks the tree's argument as the address of an array element. // The type tree->TypeGet() will typically match the element type of the array or fldSeq. // When this type doesn't match or if the fldSeq is 'NotAField' we return a new unique VN // ValueNum fgValueNumberArrIndexVal(GenTree* tree, CORINFO_CLASS_HANDLE elemTypeEq, ValueNum arrVN, ValueNum inxVN, ValueNumPair addrXvnp, FieldSeqNode* fldSeq); // Requires "funcApp" to be a VNF_PtrToArrElem, and "addrXvnp" to represent the exception set thrown // by evaluating the array index expression "tree". Returns the value number resulting from // dereferencing the array in the current GcHeap state. If "tree" is non-null, it must be the // "GT_IND" that does the dereference, and it is given the returned value number. ValueNum fgValueNumberArrIndexVal(GenTree* tree, VNFuncApp* funcApp, ValueNumPair addrXvnp); // Compute the value number for a byref-exposed load of the given type via the given pointerVN. ValueNum fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN); unsigned fgVNPassesCompleted; // Number of times fgValueNumber has been run. // Utility functions for fgValueNumber. // Perform value-numbering for the trees in "blk". void fgValueNumberBlock(BasicBlock* blk); // Requires that "entryBlock" is the entry block of loop "loopNum", and that "loopNum" is the // innermost loop of which "entryBlock" is the entry. Returns the value number that should be // assumed for the memoryKind at the start "entryBlk". ValueNum fgMemoryVNForLoopSideEffects(MemoryKind memoryKind, BasicBlock* entryBlock, unsigned loopNum); // Called when an operation (performed by "tree", described by "msg") may cause the GcHeap to be mutated. // As GcHeap is a subset of ByrefExposed, this will also annotate the ByrefExposed mutation. void fgMutateGcHeap(GenTree* tree DEBUGARG(const char* msg)); // Called when an operation (performed by "tree", described by "msg") may cause an address-exposed local to be // mutated. void fgMutateAddressExposedLocal(GenTree* tree DEBUGARG(const char* msg)); // For a GC heap store at curTree, record the new curMemoryVN's and update curTree's MemorySsaMap. // As GcHeap is a subset of ByrefExposed, this will also record the ByrefExposed store. void recordGcHeapStore(GenTree* curTree, ValueNum gcHeapVN DEBUGARG(const char* msg)); // For a store to an address-exposed local at curTree, record the new curMemoryVN and update curTree's MemorySsaMap. void recordAddressExposedLocalStore(GenTree* curTree, ValueNum memoryVN DEBUGARG(const char* msg)); void fgSetCurrentMemoryVN(MemoryKind memoryKind, ValueNum newMemoryVN); // Tree caused an update in the current memory VN. If "tree" has an associated heap SSA #, record that // value in that SSA #. void fgValueNumberRecordMemorySsa(MemoryKind memoryKind, GenTree* tree); // The input 'tree' is a leaf node that is a constant // Assign the proper value number to the tree void fgValueNumberTreeConst(GenTree* tree); // If the VN store has been initialized, reassign the // proper value number to the constant tree. void fgUpdateConstTreeValueNumber(GenTree* tree); // Assumes that all inputs to "tree" have had value numbers assigned; assigns a VN to tree. // (With some exceptions: the VN of the lhs of an assignment is assigned as part of the // assignment.) void fgValueNumberTree(GenTree* tree); void fgValueNumberAssignment(GenTreeOp* tree); // Does value-numbering for a block assignment. void fgValueNumberBlockAssignment(GenTree* tree); bool fgValueNumberBlockAssignmentTypeCheck(LclVarDsc* dstVarDsc, FieldSeqNode* dstFldSeq, GenTree* src); // Does value-numbering for a cast tree. void fgValueNumberCastTree(GenTree* tree); // Does value-numbering for an intrinsic tree. void fgValueNumberIntrinsic(GenTree* tree); #ifdef FEATURE_SIMD // Does value-numbering for a GT_SIMD tree void fgValueNumberSimd(GenTreeSIMD* tree); #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS // Does value-numbering for a GT_HWINTRINSIC tree void fgValueNumberHWIntrinsic(GenTreeHWIntrinsic* tree); #endif // FEATURE_HW_INTRINSICS // Does value-numbering for a call. We interpret some helper calls. void fgValueNumberCall(GenTreeCall* call); // Does value-numbering for a helper representing a cast operation. void fgValueNumberCastHelper(GenTreeCall* call); // Does value-numbering for a helper "call" that has a VN function symbol "vnf". void fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueNumPair vnpExc); // Requires "helpCall" to be a helper call. Assigns it a value number; // we understand the semantics of some of the calls. Returns "true" if // the call may modify the heap (we assume arbitrary memory side effects if so). bool fgValueNumberHelperCall(GenTreeCall* helpCall); // Requires that "helpFunc" is one of the pure Jit Helper methods. // Returns the corresponding VNFunc to use for value numbering VNFunc fgValueNumberJitHelperMethodVNFunc(CorInfoHelpFunc helpFunc); // Adds the exception set for the current tree node which has a memory indirection operation void fgValueNumberAddExceptionSetForIndirection(GenTree* tree, GenTree* baseAddr); // Adds the exception sets for the current tree node which is performing a division or modulus operation void fgValueNumberAddExceptionSetForDivision(GenTree* tree); // Adds the exception set for the current tree node which is performing a overflow checking operation void fgValueNumberAddExceptionSetForOverflow(GenTree* tree); // Adds the exception set for the current tree node which is performing a bounds check operation void fgValueNumberAddExceptionSetForBoundsCheck(GenTree* tree); // Adds the exception set for the current tree node which is performing a ckfinite operation void fgValueNumberAddExceptionSetForCkFinite(GenTree* tree); // Adds the exception sets for the current tree node void fgValueNumberAddExceptionSet(GenTree* tree); #ifdef DEBUG void fgDebugCheckExceptionSets(); void fgDebugCheckValueNumberedTree(GenTree* tree); #endif // These are the current value number for the memory implicit variables while // doing value numbering. These are the value numbers under the "liberal" interpretation // of memory values; the "conservative" interpretation needs no VN, since every access of // memory yields an unknown value. ValueNum fgCurMemoryVN[MemoryKindCount]; // Return a "pseudo"-class handle for an array element type. If "elemType" is TYP_STRUCT, // requires "elemStructType" to be non-null (and to have a low-order zero). Otherwise, low order bit // is 1, and the rest is an encoding of "elemTyp". static CORINFO_CLASS_HANDLE EncodeElemType(var_types elemTyp, CORINFO_CLASS_HANDLE elemStructType) { if (elemStructType != nullptr) { assert(varTypeIsStruct(elemTyp) || elemTyp == TYP_REF || elemTyp == TYP_BYREF || varTypeIsIntegral(elemTyp)); assert((size_t(elemStructType) & 0x1) == 0x0); // Make sure the encoding below is valid. return elemStructType; } else { assert(elemTyp != TYP_STRUCT); elemTyp = varTypeToSigned(elemTyp); return CORINFO_CLASS_HANDLE(size_t(elemTyp) << 1 | 0x1); } } // If "clsHnd" is the result of an "EncodePrim" call, returns true and sets "*pPrimType" to the // var_types it represents. Otherwise, returns TYP_STRUCT (on the assumption that "clsHnd" is // the struct type of the element). static var_types DecodeElemType(CORINFO_CLASS_HANDLE clsHnd) { size_t clsHndVal = size_t(clsHnd); if (clsHndVal & 0x1) { return var_types(clsHndVal >> 1); } else { return TYP_STRUCT; } } // Convert a BYTE which represents the VM's CorInfoGCtype to the JIT's var_types var_types getJitGCType(BYTE gcType); // Returns true if the provided type should be treated as a primitive type // for the unmanaged calling conventions. bool isNativePrimitiveStructType(CORINFO_CLASS_HANDLE clsHnd); enum structPassingKind { SPK_Unknown, // Invalid value, never returned SPK_PrimitiveType, // The struct is passed/returned using a primitive type. SPK_EnclosingType, // Like SPK_Primitive type, but used for return types that // require a primitive type temp that is larger than the struct size. // Currently used for structs of size 3, 5, 6, or 7 bytes. SPK_ByValue, // The struct is passed/returned by value (using the ABI rules) // for ARM64 and UNIX_X64 in multiple registers. (when all of the // parameters registers are used, then the stack will be used) // for X86 passed on the stack, for ARM32 passed in registers // or the stack or split between registers and the stack. SPK_ByValueAsHfa, // The struct is passed/returned as an HFA in multiple registers. SPK_ByReference }; // The struct is passed/returned by reference to a copy/buffer. // Get the "primitive" type that is is used when we are given a struct of size 'structSize'. // For pointer sized structs the 'clsHnd' is used to determine if the struct contains GC ref. // A "primitive" type is one of the scalar types: byte, short, int, long, ref, float, double // If we can't or shouldn't use a "primitive" type then TYP_UNKNOWN is returned. // // isVarArg is passed for use on Windows Arm64 to change the decision returned regarding // hfa types. // var_types getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd, bool isVarArg); // Get the type that is used to pass values of the given struct type. // isVarArg is passed for use on Windows Arm64 to change the decision returned regarding // hfa types. // var_types getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, structPassingKind* wbPassStruct, bool isVarArg, unsigned structSize); // Get the type that is used to return values of the given struct type. // If the size is unknown, pass 0 and it will be determined from 'clsHnd'. var_types getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, CorInfoCallConvExtension callConv, structPassingKind* wbPassStruct = nullptr, unsigned structSize = 0); #ifdef DEBUG // Print a representation of "vnp" or "vn" on standard output. // If "level" is non-zero, we also print out a partial expansion of the value. void vnpPrint(ValueNumPair vnp, unsigned level); void vnPrint(ValueNum vn, unsigned level); #endif bool fgDominate(BasicBlock* b1, BasicBlock* b2); // Return true if b1 dominates b2 // Dominator computation member functions // Not exposed outside Compiler protected: bool fgReachable(BasicBlock* b1, BasicBlock* b2); // Returns true if block b1 can reach block b2 // Compute immediate dominators, the dominator tree and and its pre/post-order travsersal numbers. void fgComputeDoms(); void fgCompDominatedByExceptionalEntryBlocks(); BlockSet_ValRet_T fgGetDominatorSet(BasicBlock* block); // Returns a set of blocks that dominate the given block. // Note: this is relatively slow compared to calling fgDominate(), // especially if dealing with a single block versus block check. void fgComputeReachabilitySets(); // Compute bbReach sets. (Also sets BBF_GC_SAFE_POINT flag on blocks.) void fgComputeReturnBlocks(); // Initialize fgReturnBlocks to a list of BBJ_RETURN blocks. void fgComputeEnterBlocksSet(); // Compute the set of entry blocks, 'fgEnterBlks'. bool fgRemoveUnreachableBlocks(); // Remove blocks determined to be unreachable by the bbReach sets. void fgComputeReachability(); // Perform flow graph node reachability analysis. BasicBlock* fgIntersectDom(BasicBlock* a, BasicBlock* b); // Intersect two immediate dominator sets. void fgDfsInvPostOrder(); // In order to compute dominance using fgIntersectDom, the flow graph nodes must be // processed in topological sort, this function takes care of that. void fgDfsInvPostOrderHelper(BasicBlock* block, BlockSet& visited, unsigned* count); BlockSet_ValRet_T fgDomFindStartNodes(); // Computes which basic blocks don't have incoming edges in the flow graph. // Returns this as a set. INDEBUG(void fgDispDomTree(DomTreeNode* domTree);) // Helper that prints out the Dominator Tree in debug builds. DomTreeNode* fgBuildDomTree(); // Once we compute all the immediate dominator sets for each node in the flow graph // (performed by fgComputeDoms), this procedure builds the dominance tree represented // adjacency lists. // In order to speed up the queries of the form 'Does A dominates B', we can perform a DFS preorder and postorder // traversal of the dominance tree and the dominance query will become A dominates B iif preOrder(A) <= preOrder(B) // && postOrder(A) >= postOrder(B) making the computation O(1). void fgNumberDomTree(DomTreeNode* domTree); // When the flow graph changes, we need to update the block numbers, predecessor lists, reachability sets, // dominators, and possibly loops. void fgUpdateChangedFlowGraph(const bool computePreds = true, const bool computeDoms = true, const bool computeReturnBlocks = false, const bool computeLoops = false); public: // Compute the predecessors of the blocks in the control flow graph. void fgComputePreds(); // Remove all predecessor information. void fgRemovePreds(); // Compute the cheap flow graph predecessors lists. This is used in some early phases // before the full predecessors lists are computed. void fgComputeCheapPreds(); private: void fgAddCheapPred(BasicBlock* block, BasicBlock* blockPred); void fgRemoveCheapPred(BasicBlock* block, BasicBlock* blockPred); public: enum GCPollType { GCPOLL_NONE, GCPOLL_CALL, GCPOLL_INLINE }; // Initialize the per-block variable sets (used for liveness analysis). void fgInitBlockVarSets(); PhaseStatus fgInsertGCPolls(); BasicBlock* fgCreateGCPoll(GCPollType pollType, BasicBlock* block); // Requires that "block" is a block that returns from // a finally. Returns the number of successors (jump targets of // of blocks in the covered "try" that did a "LEAVE".) unsigned fgNSuccsOfFinallyRet(BasicBlock* block); // Requires that "block" is a block that returns (in the sense of BBJ_EHFINALLYRET) from // a finally. Returns its "i"th successor (jump targets of // of blocks in the covered "try" that did a "LEAVE".) // Requires that "i" < fgNSuccsOfFinallyRet(block). BasicBlock* fgSuccOfFinallyRet(BasicBlock* block, unsigned i); private: // Factor out common portions of the impls of the methods above. void fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock** bres, unsigned* nres); public: // For many purposes, it is desirable to be able to enumerate the *distinct* targets of a switch statement, // skipping duplicate targets. (E.g., in flow analyses that are only interested in the set of possible targets.) // SwitchUniqueSuccSet contains the non-duplicated switch targets. // (Code that modifies the jump table of a switch has an obligation to call Compiler::UpdateSwitchTableTarget, // which in turn will call the "UpdateTarget" method of this type if a SwitchUniqueSuccSet has already // been computed for the switch block. If a switch block is deleted or is transformed into a non-switch, // we leave the entry associated with the block, but it will no longer be accessed.) struct SwitchUniqueSuccSet { unsigned numDistinctSuccs; // Number of distinct targets of the switch. BasicBlock** nonDuplicates; // Array of "numDistinctSuccs", containing all the distinct switch target // successors. // The switch block "switchBlk" just had an entry with value "from" modified to the value "to". // Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk", // remove it from "this", and ensure that "to" is a member. Use "alloc" to do any required allocation. void UpdateTarget(CompAllocator alloc, BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to); }; typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, SwitchUniqueSuccSet> BlockToSwitchDescMap; private: // Maps BasicBlock*'s that end in switch statements to SwitchUniqueSuccSets that allow // iteration over only the distinct successors. BlockToSwitchDescMap* m_switchDescMap; public: BlockToSwitchDescMap* GetSwitchDescMap(bool createIfNull = true) { if ((m_switchDescMap == nullptr) && createIfNull) { m_switchDescMap = new (getAllocator()) BlockToSwitchDescMap(getAllocator()); } return m_switchDescMap; } // Invalidate the map of unique switch block successors. For example, since the hash key of the map // depends on block numbers, we must invalidate the map when the blocks are renumbered, to ensure that // we don't accidentally look up and return the wrong switch data. void InvalidateUniqueSwitchSuccMap() { m_switchDescMap = nullptr; } // Requires "switchBlock" to be a block that ends in a switch. Returns // the corresponding SwitchUniqueSuccSet. SwitchUniqueSuccSet GetDescriptorForSwitch(BasicBlock* switchBlk); // The switch block "switchBlk" just had an entry with value "from" modified to the value "to". // Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk", // remove it from "this", and ensure that "to" is a member. void UpdateSwitchTableTarget(BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to); // Remove the "SwitchUniqueSuccSet" of "switchBlk" in the BlockToSwitchDescMap. void fgInvalidateSwitchDescMapEntry(BasicBlock* switchBlk); BasicBlock* fgFirstBlockOfHandler(BasicBlock* block); bool fgIsFirstBlockOfFilterOrHandler(BasicBlock* block); flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred); flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred, flowList*** ptrToPred); flowList* fgRemoveRefPred(BasicBlock* block, BasicBlock* blockPred); flowList* fgRemoveAllRefPreds(BasicBlock* block, BasicBlock* blockPred); void fgRemoveBlockAsPred(BasicBlock* block); void fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSwitchBlock); void fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* newTarget, BasicBlock* oldTarget); void fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, BasicBlock* oldTarget); void fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock* newPred); flowList* fgAddRefPred(BasicBlock* block, BasicBlock* blockPred, flowList* oldEdge = nullptr, bool initializingPreds = false); // Only set to 'true' when we are computing preds in // fgComputePreds() void fgFindBasicBlocks(); bool fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt); bool fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionIndex, bool putInTryRegion); BasicBlock* fgFindInsertPoint(unsigned regionIndex, bool putInTryRegion, BasicBlock* startBlk, BasicBlock* endBlk, BasicBlock* nearBlk, BasicBlock* jumpBlk, bool runRarely); unsigned fgGetNestingLevel(BasicBlock* block, unsigned* pFinallyNesting = nullptr); void fgPostImportationCleanup(); void fgRemoveStmt(BasicBlock* block, Statement* stmt DEBUGARG(bool isUnlink = false)); void fgUnlinkStmt(BasicBlock* block, Statement* stmt); bool fgCheckRemoveStmt(BasicBlock* block, Statement* stmt); void fgCreateLoopPreHeader(unsigned lnum); void fgUnreachableBlock(BasicBlock* block); void fgRemoveConditionalJump(BasicBlock* block); BasicBlock* fgLastBBInMainFunction(); BasicBlock* fgEndBBAfterMainFunction(); void fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd); void fgRemoveBlock(BasicBlock* block, bool unreachable); bool fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext); void fgCompactBlocks(BasicBlock* block, BasicBlock* bNext); void fgUpdateLoopsAfterCompacting(BasicBlock* block, BasicBlock* bNext); BasicBlock* fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst); bool fgRenumberBlocks(); bool fgExpandRarelyRunBlocks(); bool fgEhAllowsMoveBlock(BasicBlock* bBefore, BasicBlock* bAfter); void fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBlock* insertAfterBlk); enum FG_RELOCATE_TYPE { FG_RELOCATE_TRY, // relocate the 'try' region FG_RELOCATE_HANDLER // relocate the handler region (including the filter if necessary) }; BasicBlock* fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType); #if defined(FEATURE_EH_FUNCLETS) #if defined(TARGET_ARM) void fgClearFinallyTargetBit(BasicBlock* block); #endif // defined(TARGET_ARM) bool fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block); bool fgAnyIntraHandlerPreds(BasicBlock* block); void fgInsertFuncletPrologBlock(BasicBlock* block); void fgCreateFuncletPrologBlocks(); void fgCreateFunclets(); #else // !FEATURE_EH_FUNCLETS bool fgRelocateEHRegions(); #endif // !FEATURE_EH_FUNCLETS bool fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* target); bool fgBlockEndFavorsTailDuplication(BasicBlock* block, unsigned lclNum); bool fgBlockIsGoodTailDuplicationCandidate(BasicBlock* block, unsigned* lclNum); bool fgOptimizeEmptyBlock(BasicBlock* block); bool fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBlock* bDest); bool fgOptimizeBranch(BasicBlock* bJump); bool fgOptimizeSwitchBranches(BasicBlock* block); bool fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, BasicBlock* bPrev); bool fgOptimizeSwitchJumps(); #ifdef DEBUG void fgPrintEdgeWeights(); #endif void fgComputeBlockAndEdgeWeights(); weight_t fgComputeMissingBlockWeights(); void fgComputeCalledCount(weight_t returnWeight); void fgComputeEdgeWeights(); bool fgReorderBlocks(); PhaseStatus fgDetermineFirstColdBlock(); bool fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc = nullptr); bool fgUpdateFlowGraph(bool doTailDup = false); void fgFindOperOrder(); // method that returns if you should split here typedef bool(fgSplitPredicate)(GenTree* tree, GenTree* parent, fgWalkData* data); void fgSetBlockOrder(); void fgRemoveReturnBlock(BasicBlock* block); /* Helper code that has been factored out */ inline void fgConvertBBToThrowBB(BasicBlock* block); bool fgCastNeeded(GenTree* tree, var_types toType); GenTree* fgDoNormalizeOnStore(GenTree* tree); GenTree* fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry); // The following check for loops that don't execute calls bool fgLoopCallMarked; void fgLoopCallTest(BasicBlock* srcBB, BasicBlock* dstBB); void fgLoopCallMark(); void fgMarkLoopHead(BasicBlock* block); unsigned fgGetCodeEstimate(BasicBlock* block); #if DUMP_FLOWGRAPHS enum class PhasePosition { PrePhase, PostPhase }; const char* fgProcessEscapes(const char* nameIn, escapeMapping_t* map); static void fgDumpTree(FILE* fgxFile, GenTree* const tree); FILE* fgOpenFlowGraphFile(bool* wbDontClose, Phases phase, PhasePosition pos, LPCWSTR type); bool fgDumpFlowGraph(Phases phase, PhasePosition pos); #endif // DUMP_FLOWGRAPHS #ifdef DEBUG void fgDispDoms(); void fgDispReach(); void fgDispBBLiveness(BasicBlock* block); void fgDispBBLiveness(); void fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth = 0); void fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, bool dumpTrees); void fgDispBasicBlocks(bool dumpTrees = false); void fgDumpStmtTree(Statement* stmt, unsigned bbNum); void fgDumpBlock(BasicBlock* block); void fgDumpTrees(BasicBlock* firstBlock, BasicBlock* lastBlock); static fgWalkPreFn fgStress64RsltMulCB; void fgStress64RsltMul(); void fgDebugCheckUpdate(); void fgDebugCheckBBNumIncreasing(); void fgDebugCheckBBlist(bool checkBBNum = false, bool checkBBRefs = true); void fgDebugCheckBlockLinks(); void fgDebugCheckLinks(bool morphTrees = false); void fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees); void fgDebugCheckNodeLinks(BasicBlock* block, Statement* stmt); void fgDebugCheckNodesUniqueness(); void fgDebugCheckLoopTable(); void fgDebugCheckFlags(GenTree* tree); void fgDebugCheckDispFlags(GenTree* tree, GenTreeFlags dispFlags, GenTreeDebugFlags debugFlags); void fgDebugCheckFlagsHelper(GenTree* tree, GenTreeFlags actualFlags, GenTreeFlags expectedFlags); void fgDebugCheckTryFinallyExits(); void fgDebugCheckProfileData(); bool fgDebugCheckIncomingProfileData(BasicBlock* block); bool fgDebugCheckOutgoingProfileData(BasicBlock* block); #endif // DEBUG static bool fgProfileWeightsEqual(weight_t weight1, weight_t weight2); static bool fgProfileWeightsConsistent(weight_t weight1, weight_t weight2); static GenTree* fgGetFirstNode(GenTree* tree); //--------------------- Walking the trees in the IR ----------------------- struct fgWalkData { Compiler* compiler; fgWalkPreFn* wtprVisitorFn; fgWalkPostFn* wtpoVisitorFn; void* pCallbackData; // user-provided data GenTree* parent; // parent of current node, provided to callback GenTreeStack* parentStack; // stack of parent nodes, if asked for bool wtprLclsOnly; // whether to only visit lclvar nodes #ifdef DEBUG bool printModified; // callback can use this #endif }; fgWalkResult fgWalkTreePre(GenTree** pTree, fgWalkPreFn* visitor, void* pCallBackData = nullptr, bool lclVarsOnly = false, bool computeStack = false); fgWalkResult fgWalkTree(GenTree** pTree, fgWalkPreFn* preVisitor, fgWalkPostFn* postVisitor, void* pCallBackData = nullptr); void fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData); //----- Postorder fgWalkResult fgWalkTreePost(GenTree** pTree, fgWalkPostFn* visitor, void* pCallBackData = nullptr, bool computeStack = false); // An fgWalkPreFn that looks for expressions that have inline throws in // minopts mode. Basically it looks for tress with gtOverflowEx() or // GTF_IND_RNGCHK. It returns WALK_ABORT if one is found. It // returns WALK_SKIP_SUBTREES if GTF_EXCEPT is not set (assumes flags // properly propagated to parent trees). It returns WALK_CONTINUE // otherwise. static fgWalkResult fgChkThrowCB(GenTree** pTree, Compiler::fgWalkData* data); static fgWalkResult fgChkLocAllocCB(GenTree** pTree, Compiler::fgWalkData* data); static fgWalkResult fgChkQmarkCB(GenTree** pTree, Compiler::fgWalkData* data); /************************************************************************** * PROTECTED *************************************************************************/ protected: friend class SsaBuilder; friend struct ValueNumberState; //--------------------- Detect the basic blocks --------------------------- BasicBlock** fgBBs; // Table of pointers to the BBs void fgInitBBLookup(); BasicBlock* fgLookupBB(unsigned addr); bool fgCanSwitchToOptimized(); void fgSwitchToOptimized(const char* reason); bool fgMayExplicitTailCall(); void fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget); void fgMarkBackwardJump(BasicBlock* startBlock, BasicBlock* endBlock); void fgLinkBasicBlocks(); unsigned fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget); void fgCheckBasicBlockControlFlow(); void fgControlFlowPermitted(BasicBlock* blkSrc, BasicBlock* blkDest, bool IsLeave = false /* is the src a leave block */); bool fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc, BasicBlock* blkDest, bool sibling); void fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, bool isInlining); void fgAdjustForAddressExposedOrWrittenThis(); unsigned fgStressBBProf() { #ifdef DEBUG unsigned result = JitConfig.JitStressBBProf(); if (result == 0) { if (compStressCompile(STRESS_BB_PROFILE, 15)) { result = 1; } } return result; #else return 0; #endif } bool fgHaveProfileData(); bool fgGetProfileWeightForBasicBlock(IL_OFFSET offset, weight_t* weight); Instrumentor* fgCountInstrumentor; Instrumentor* fgClassInstrumentor; PhaseStatus fgPrepareToInstrumentMethod(); PhaseStatus fgInstrumentMethod(); PhaseStatus fgIncorporateProfileData(); void fgIncorporateBlockCounts(); void fgIncorporateEdgeCounts(); CORINFO_CLASS_HANDLE getRandomClass(ICorJitInfo::PgoInstrumentationSchema* schema, UINT32 countSchemaItems, BYTE* pInstrumentationData, int32_t ilOffset, CLRRandom* random); public: const char* fgPgoFailReason; bool fgPgoDisabled; ICorJitInfo::PgoSource fgPgoSource; ICorJitInfo::PgoInstrumentationSchema* fgPgoSchema; BYTE* fgPgoData; UINT32 fgPgoSchemaCount; HRESULT fgPgoQueryResult; UINT32 fgNumProfileRuns; UINT32 fgPgoBlockCounts; UINT32 fgPgoEdgeCounts; UINT32 fgPgoClassProfiles; unsigned fgPgoInlineePgo; unsigned fgPgoInlineeNoPgo; unsigned fgPgoInlineeNoPgoSingleBlock; void WalkSpanningTree(SpanningTreeVisitor* visitor); void fgSetProfileWeight(BasicBlock* block, weight_t weight); void fgApplyProfileScale(); bool fgHaveSufficientProfileData(); bool fgHaveTrustedProfileData(); // fgIsUsingProfileWeights - returns true if we have real profile data for this method // or if we have some fake profile data for the stress mode bool fgIsUsingProfileWeights() { return (fgHaveProfileData() || fgStressBBProf()); } // fgProfileRunsCount - returns total number of scenario runs for the profile data // or BB_UNITY_WEIGHT_UNSIGNED when we aren't using profile data. unsigned fgProfileRunsCount() { return fgIsUsingProfileWeights() ? fgNumProfileRuns : BB_UNITY_WEIGHT_UNSIGNED; } //-------- Insert a statement at the start or end of a basic block -------- #ifdef DEBUG public: static bool fgBlockContainsStatementBounded(BasicBlock* block, Statement* stmt, bool answerOnBoundExceeded = true); #endif public: Statement* fgNewStmtAtBeg(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); void fgInsertStmtAtEnd(BasicBlock* block, Statement* stmt); Statement* fgNewStmtAtEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); Statement* fgNewStmtNearEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); private: void fgInsertStmtNearEnd(BasicBlock* block, Statement* stmt); void fgInsertStmtAtBeg(BasicBlock* block, Statement* stmt); void fgInsertStmtAfter(BasicBlock* block, Statement* insertionPoint, Statement* stmt); public: void fgInsertStmtBefore(BasicBlock* block, Statement* insertionPoint, Statement* stmt); private: Statement* fgInsertStmtListAfter(BasicBlock* block, Statement* stmtAfter, Statement* stmtList); // Create a new temporary variable to hold the result of *ppTree, // and transform the graph accordingly. GenTree* fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType = nullptr); GenTree* fgMakeMultiUse(GenTree** ppTree); private: // Recognize a bitwise rotation pattern and convert into a GT_ROL or a GT_ROR node. GenTree* fgRecognizeAndMorphBitwiseRotation(GenTree* tree); bool fgOperIsBitwiseRotationRoot(genTreeOps oper); #if !defined(TARGET_64BIT) // Recognize and morph a long multiplication with 32 bit operands. GenTreeOp* fgRecognizeAndMorphLongMul(GenTreeOp* mul); GenTreeOp* fgMorphLongMul(GenTreeOp* mul); #endif //-------- Determine the order in which the trees will be evaluated ------- unsigned fgTreeSeqNum; GenTree* fgTreeSeqLst; GenTree* fgTreeSeqBeg; GenTree* fgSetTreeSeq(GenTree* tree, GenTree* prev = nullptr, bool isLIR = false); void fgSetTreeSeqHelper(GenTree* tree, bool isLIR); void fgSetTreeSeqFinish(GenTree* tree, bool isLIR); void fgSetStmtSeq(Statement* stmt); void fgSetBlockOrder(BasicBlock* block); //------------------------- Morphing -------------------------------------- unsigned fgPtrArgCntMax; public: //------------------------------------------------------------------------ // fgGetPtrArgCntMax: Return the maximum number of pointer-sized stack arguments that calls inside this method // can push on the stack. This value is calculated during morph. // // Return Value: // Returns fgPtrArgCntMax, that is a private field. // unsigned fgGetPtrArgCntMax() const { return fgPtrArgCntMax; } //------------------------------------------------------------------------ // fgSetPtrArgCntMax: Set the maximum number of pointer-sized stack arguments that calls inside this method // can push on the stack. This function is used during StackLevelSetter to fix incorrect morph calculations. // void fgSetPtrArgCntMax(unsigned argCntMax) { fgPtrArgCntMax = argCntMax; } bool compCanEncodePtrArgCntMax(); private: hashBv* fgOutgoingArgTemps; hashBv* fgCurrentlyInUseArgTemps; void fgSetRngChkTarget(GenTree* tree, bool delay = true); BasicBlock* fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay); #if REARRANGE_ADDS void fgMoveOpsLeft(GenTree* tree); #endif bool fgIsCommaThrow(GenTree* tree, bool forFolding = false); bool fgIsThrow(GenTree* tree); bool fgInDifferentRegions(BasicBlock* blk1, BasicBlock* blk2); bool fgIsBlockCold(BasicBlock* block); GenTree* fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper); GenTree* fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs = true); GenTree* fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs); // A "MorphAddrContext" carries information from the surrounding context. If we are evaluating a byref address, // it is useful to know whether the address will be immediately dereferenced, or whether the address value will // be used, perhaps by passing it as an argument to a called method. This affects how null checking is done: // for sufficiently small offsets, we can rely on OS page protection to implicitly null-check addresses that we // know will be dereferenced. To know that reliance on implicit null checking is sound, we must further know that // all offsets between the top-level indirection and the bottom are constant, and that their sum is sufficiently // small; hence the other fields of MorphAddrContext. enum MorphAddrContextKind { MACK_Ind, MACK_Addr, }; struct MorphAddrContext { MorphAddrContextKind m_kind; bool m_allConstantOffsets; // Valid only for "m_kind == MACK_Ind". True iff all offsets between // top-level indirection and here have been constants. size_t m_totalOffset; // Valid only for "m_kind == MACK_Ind", and if "m_allConstantOffsets" is true. // In that case, is the sum of those constant offsets. MorphAddrContext(MorphAddrContextKind kind) : m_kind(kind), m_allConstantOffsets(true), m_totalOffset(0) { } }; // A MACK_CopyBlock context is immutable, so we can just make one of these and share it. static MorphAddrContext s_CopyBlockMAC; #ifdef FEATURE_SIMD GenTree* getSIMDStructFromField(GenTree* tree, CorInfoType* simdBaseJitTypeOut, unsigned* indexOut, unsigned* simdSizeOut, bool ignoreUsedInSIMDIntrinsic = false); GenTree* fgMorphFieldAssignToSimdSetElement(GenTree* tree); GenTree* fgMorphFieldToSimdGetElement(GenTree* tree); bool fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt); void impMarkContiguousSIMDFieldAssignments(Statement* stmt); // fgPreviousCandidateSIMDFieldAsgStmt is only used for tracking previous simd field assignment // in function: Complier::impMarkContiguousSIMDFieldAssignments. Statement* fgPreviousCandidateSIMDFieldAsgStmt; #endif // FEATURE_SIMD GenTree* fgMorphArrayIndex(GenTree* tree); GenTree* fgMorphExpandCast(GenTreeCast* tree); GenTreeFieldList* fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl); void fgInitArgInfo(GenTreeCall* call); GenTreeCall* fgMorphArgs(GenTreeCall* call); void fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass); GenTree* fgMorphLocalVar(GenTree* tree, bool forceRemorph); public: bool fgAddrCouldBeNull(GenTree* addr); private: GenTree* fgMorphField(GenTree* tree, MorphAddrContext* mac); bool fgCanFastTailCall(GenTreeCall* call, const char** failReason); #if FEATURE_FASTTAILCALL bool fgCallHasMustCopyByrefParameter(GenTreeCall* callee); #endif bool fgCheckStmtAfterTailCall(); GenTree* fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help); bool fgCanTailCallViaJitHelper(); void fgMorphTailCallViaJitHelper(GenTreeCall* call); GenTree* fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall, CORINFO_METHOD_HANDLE callTargetStubHnd, CORINFO_METHOD_HANDLE dispatcherHnd); GenTree* getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle); GenTree* getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle); GenTree* getVirtMethodPointerTree(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); GenTree* getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent); GenTree* fgMorphPotentialTailCall(GenTreeCall* call); GenTree* fgGetStubAddrArg(GenTreeCall* call); unsigned fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry); void fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall); Statement* fgAssignRecursiveCallArgToCallerParam(GenTree* arg, fgArgTabEntry* argTabEntry, unsigned lclParamNum, BasicBlock* block, const DebugInfo& callDI, Statement* tmpAssignmentInsertionPoint, Statement* paramAssignmentInsertionPoint); GenTree* fgMorphCall(GenTreeCall* call); GenTree* fgExpandVirtualVtableCallTarget(GenTreeCall* call); void fgMorphCallInline(GenTreeCall* call, InlineResult* result); void fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext); #if DEBUG void fgNoteNonInlineCandidate(Statement* stmt, GenTreeCall* call); static fgWalkPreFn fgFindNonInlineCandidate; #endif GenTree* fgOptimizeDelegateConstructor(GenTreeCall* call, CORINFO_CONTEXT_HANDLE* ExactContextHnd, CORINFO_RESOLVED_TOKEN* ldftnToken); GenTree* fgMorphLeaf(GenTree* tree); void fgAssignSetVarDef(GenTree* tree); GenTree* fgMorphOneAsgBlockOp(GenTree* tree); GenTree* fgMorphInitBlock(GenTree* tree); GenTree* fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize); GenTree* fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue = false); GenTree* fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd); GenTree* fgMorphCopyBlock(GenTree* tree); GenTree* fgMorphStoreDynBlock(GenTreeStoreDynBlk* tree); GenTree* fgMorphForRegisterFP(GenTree* tree); GenTree* fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac = nullptr); GenTree* fgOptimizeCast(GenTreeCast* cast); GenTree* fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp); GenTree* fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp); #ifdef FEATURE_HW_INTRINSICS GenTree* fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node); #endif GenTree* fgOptimizeCommutativeArithmetic(GenTreeOp* tree); GenTree* fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp); GenTree* fgOptimizeAddition(GenTreeOp* add); GenTree* fgOptimizeMultiply(GenTreeOp* mul); GenTree* fgOptimizeBitwiseAnd(GenTreeOp* andOp); GenTree* fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects); GenTree* fgMorphRetInd(GenTreeUnOp* tree); GenTree* fgMorphModToSubMulDiv(GenTreeOp* tree); GenTree* fgMorphSmpOpOptional(GenTreeOp* tree); GenTree* fgMorphMultiOp(GenTreeMultiOp* multiOp); GenTree* fgMorphConst(GenTree* tree); bool fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2); GenTreeLclVar* fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes = true); GenTreeOp* fgMorphCommutative(GenTreeOp* tree); GenTree* fgMorphCastedBitwiseOp(GenTreeOp* tree); GenTree* fgMorphReduceAddOps(GenTree* tree); public: GenTree* fgMorphTree(GenTree* tree, MorphAddrContext* mac = nullptr); private: void fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree)); void fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree)); void fgMorphTreeDone(GenTree* tree, GenTree* oldTree = nullptr DEBUGARG(int morphNum = 0)); Statement* fgMorphStmt; unsigned fgGetBigOffsetMorphingTemp(var_types type); // We cache one temp per type to be // used when morphing big offset. //----------------------- Liveness analysis ------------------------------- VARSET_TP fgCurUseSet; // vars used by block (before an assignment) VARSET_TP fgCurDefSet; // vars assigned by block (before a use) MemoryKindSet fgCurMemoryUse; // True iff the current basic block uses memory. MemoryKindSet fgCurMemoryDef; // True iff the current basic block modifies memory. MemoryKindSet fgCurMemoryHavoc; // True if the current basic block is known to set memory to a "havoc" value. bool byrefStatesMatchGcHeapStates; // True iff GcHeap and ByrefExposed memory have all the same def points. void fgMarkUseDef(GenTreeLclVarCommon* tree); void fgBeginScopeLife(VARSET_TP* inScope, VarScopeDsc* var); void fgEndScopeLife(VARSET_TP* inScope, VarScopeDsc* var); void fgMarkInScope(BasicBlock* block, VARSET_VALARG_TP inScope); void fgUnmarkInScope(BasicBlock* block, VARSET_VALARG_TP unmarkScope); void fgExtendDbgScopes(); void fgExtendDbgLifetimes(); #ifdef DEBUG void fgDispDebugScopes(); #endif // DEBUG //------------------------------------------------------------------------- // // The following keeps track of any code we've added for things like array // range checking or explicit calls to enable GC, and so on. // public: struct AddCodeDsc { AddCodeDsc* acdNext; BasicBlock* acdDstBlk; // block to which we jump unsigned acdData; SpecialCodeKind acdKind; // what kind of a special block is this? #if !FEATURE_FIXED_OUT_ARGS bool acdStkLvlInit; // has acdStkLvl value been already set? unsigned acdStkLvl; // stack level in stack slots. #endif // !FEATURE_FIXED_OUT_ARGS }; private: static unsigned acdHelper(SpecialCodeKind codeKind); AddCodeDsc* fgAddCodeList; bool fgAddCodeModf; bool fgRngChkThrowAdded; AddCodeDsc* fgExcptnTargetCache[SCK_COUNT]; BasicBlock* fgRngChkTarget(BasicBlock* block, SpecialCodeKind kind); BasicBlock* fgAddCodeRef(BasicBlock* srcBlk, unsigned refData, SpecialCodeKind kind); public: AddCodeDsc* fgFindExcptnTarget(SpecialCodeKind kind, unsigned refData); bool fgUseThrowHelperBlocks(); AddCodeDsc* fgGetAdditionalCodeDescriptors() { return fgAddCodeList; } private: bool fgIsCodeAdded(); bool fgIsThrowHlpBlk(BasicBlock* block); #if !FEATURE_FIXED_OUT_ARGS unsigned fgThrowHlpBlkStkLevel(BasicBlock* block); #endif // !FEATURE_FIXED_OUT_ARGS unsigned fgBigOffsetMorphingTemps[TYP_COUNT]; unsigned fgCheckInlineDepthAndRecursion(InlineInfo* inlineInfo); void fgInvokeInlineeCompiler(GenTreeCall* call, InlineResult* result, InlineContext** createdContext); void fgInsertInlineeBlocks(InlineInfo* pInlineInfo); Statement* fgInlinePrependStatements(InlineInfo* inlineInfo); void fgInlineAppendStatements(InlineInfo* inlineInfo, BasicBlock* block, Statement* stmt); #if FEATURE_MULTIREG_RET GenTree* fgGetStructAsStructPtr(GenTree* tree); GenTree* fgAssignStructInlineeToVar(GenTree* child, CORINFO_CLASS_HANDLE retClsHnd); void fgAttachStructInlineeToAsg(GenTree* tree, GenTree* child, CORINFO_CLASS_HANDLE retClsHnd); #endif // FEATURE_MULTIREG_RET static fgWalkPreFn fgUpdateInlineReturnExpressionPlaceHolder; static fgWalkPostFn fgLateDevirtualization; #ifdef DEBUG static fgWalkPreFn fgDebugCheckInlineCandidates; void CheckNoTransformableIndirectCallsRemain(); static fgWalkPreFn fgDebugCheckForTransformableIndirectCalls; #endif void fgPromoteStructs(); void fgMorphStructField(GenTree* tree, GenTree* parent); void fgMorphLocalField(GenTree* tree, GenTree* parent); // Reset the refCount for implicit byrefs. void fgResetImplicitByRefRefCount(); // Change implicit byrefs' types from struct to pointer, and for any that were // promoted, create new promoted struct temps. void fgRetypeImplicitByRefArgs(); // Rewrite appearances of implicit byrefs (manifest the implied additional level of indirection). bool fgMorphImplicitByRefArgs(GenTree* tree); GenTree* fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr); // Clear up annotations for any struct promotion temps created for implicit byrefs. void fgMarkDemotedImplicitByRefArgs(); void fgMarkAddressExposedLocals(); void fgMarkAddressExposedLocals(Statement* stmt); PhaseStatus fgForwardSub(); bool fgForwardSubBlock(BasicBlock* block); bool fgForwardSubStatement(Statement* statement); static fgWalkPreFn fgUpdateSideEffectsPre; static fgWalkPostFn fgUpdateSideEffectsPost; // The given local variable, required to be a struct variable, is being assigned via // a "lclField", to make it masquerade as an integral type in the ABI. Make sure that // the variable is not enregistered, and is therefore not promoted independently. void fgLclFldAssign(unsigned lclNum); static fgWalkPreFn gtHasLocalsWithAddrOpCB; enum TypeProducerKind { TPK_Unknown = 0, // May not be a RuntimeType TPK_Handle = 1, // RuntimeType via handle TPK_GetType = 2, // RuntimeType via Object.get_Type() TPK_Null = 3, // Tree value is null TPK_Other = 4 // RuntimeType via other means }; TypeProducerKind gtGetTypeProducerKind(GenTree* tree); bool gtIsTypeHandleToRuntimeTypeHelper(GenTreeCall* call); bool gtIsTypeHandleToRuntimeTypeHandleHelper(GenTreeCall* call, CorInfoHelpFunc* pHelper = nullptr); bool gtIsActiveCSE_Candidate(GenTree* tree); bool fgIsBigOffset(size_t offset); bool fgNeedReturnSpillTemp(); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Optimizer XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: void optInit(); GenTree* optRemoveRangeCheck(GenTreeBoundsChk* check, GenTree* comma, Statement* stmt); GenTree* optRemoveStandaloneRangeCheck(GenTreeBoundsChk* check, Statement* stmt); void optRemoveCommaBasedRangeCheck(GenTree* comma, Statement* stmt); protected: // Do hoisting for all loops. void optHoistLoopCode(); // To represent sets of VN's that have already been hoisted in outer loops. typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, bool> VNSet; struct LoopHoistContext { private: // The set of variables hoisted in the current loop (or nullptr if there are none). VNSet* m_pHoistedInCurLoop; public: // Value numbers of expressions that have been hoisted in parent loops in the loop nest. VNSet m_hoistedInParentLoops; // Value numbers of expressions that have been hoisted in the current (or most recent) loop in the nest. // Previous decisions on loop-invariance of value numbers in the current loop. VNSet m_curLoopVnInvariantCache; VNSet* GetHoistedInCurLoop(Compiler* comp) { if (m_pHoistedInCurLoop == nullptr) { m_pHoistedInCurLoop = new (comp->getAllocatorLoopHoist()) VNSet(comp->getAllocatorLoopHoist()); } return m_pHoistedInCurLoop; } VNSet* ExtractHoistedInCurLoop() { VNSet* res = m_pHoistedInCurLoop; m_pHoistedInCurLoop = nullptr; return res; } LoopHoistContext(Compiler* comp) : m_pHoistedInCurLoop(nullptr) , m_hoistedInParentLoops(comp->getAllocatorLoopHoist()) , m_curLoopVnInvariantCache(comp->getAllocatorLoopHoist()) { } }; // Do hoisting for loop "lnum" (an index into the optLoopTable), and all loops nested within it. // Tracks the expressions that have been hoisted by containing loops by temporarily recording their // value numbers in "m_hoistedInParentLoops". This set is not modified by the call. void optHoistLoopNest(unsigned lnum, LoopHoistContext* hoistCtxt); // Do hoisting for a particular loop ("lnum" is an index into the optLoopTable.) // Assumes that expressions have been hoisted in containing loops if their value numbers are in // "m_hoistedInParentLoops". // void optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt); // Hoist all expressions in "blocks" that are invariant in loop "loopNum" (an index into the optLoopTable) // outside of that loop. Exempt expressions whose value number is in "m_hoistedInParentLoops"; add VN's of hoisted // expressions to "hoistInLoop". void optHoistLoopBlocks(unsigned loopNum, ArrayStack<BasicBlock*>* blocks, LoopHoistContext* hoistContext); // Return true if the tree looks profitable to hoist out of loop 'lnum'. bool optIsProfitableToHoistTree(GenTree* tree, unsigned lnum); // Performs the hoisting 'tree' into the PreHeader for loop 'lnum' void optHoistCandidate(GenTree* tree, BasicBlock* treeBb, unsigned lnum, LoopHoistContext* hoistCtxt); // Returns true iff the ValueNum "vn" represents a value that is loop-invariant in "lnum". // Constants and init values are always loop invariant. // VNPhi's connect VN's to the SSA definition, so we can know if the SSA def occurs in the loop. bool optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNSet* recordedVNs); // If "blk" is the entry block of a natural loop, returns true and sets "*pLnum" to the index of the loop // in the loop table. bool optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLnum); // Records the set of "side effects" of all loops: fields (object instance and static) // written to, and SZ-array element type equivalence classes updated. void optComputeLoopSideEffects(); #ifdef DEBUG bool optAnyChildNotRemoved(unsigned loopNum); #endif // DEBUG // Mark a loop as removed. void optMarkLoopRemoved(unsigned loopNum); private: // Requires "lnum" to be the index of an outermost loop in the loop table. Traverses the body of that loop, // including all nested loops, and records the set of "side effects" of the loop: fields (object instance and // static) written to, and SZ-array element type equivalence classes updated. void optComputeLoopNestSideEffects(unsigned lnum); // Given a loop number 'lnum' mark it and any nested loops as having 'memoryHavoc' void optRecordLoopNestsMemoryHavoc(unsigned lnum, MemoryKindSet memoryHavoc); // Add the side effects of "blk" (which is required to be within a loop) to all loops of which it is a part. // Returns false if we encounter a block that is not marked as being inside a loop. // bool optComputeLoopSideEffectsOfBlock(BasicBlock* blk); // Hoist the expression "expr" out of loop "lnum". void optPerformHoistExpr(GenTree* expr, BasicBlock* exprBb, unsigned lnum); public: void optOptimizeBools(); public: PhaseStatus optInvertLoops(); // Invert loops so they're entered at top and tested at bottom. PhaseStatus optOptimizeLayout(); // Optimize the BasicBlock layout of the method PhaseStatus optSetBlockWeights(); PhaseStatus optFindLoopsPhase(); // Finds loops and records them in the loop table void optFindLoops(); PhaseStatus optCloneLoops(); void optCloneLoop(unsigned loopInd, LoopCloneContext* context); void optEnsureUniqueHead(unsigned loopInd, weight_t ambientWeight); PhaseStatus optUnrollLoops(); // Unrolls loops (needs to have cost info) void optRemoveRedundantZeroInits(); protected: // This enumeration describes what is killed by a call. enum callInterf { CALLINT_NONE, // no interference (most helpers) CALLINT_REF_INDIRS, // kills GC ref indirections (SETFIELD OBJ) CALLINT_SCL_INDIRS, // kills non GC ref indirections (SETFIELD non-OBJ) CALLINT_ALL_INDIRS, // kills both GC ref and non GC ref indirections (SETFIELD STRUCT) CALLINT_ALL, // kills everything (normal method call) }; enum class FieldKindForVN { SimpleStatic, WithBaseAddr }; public: // A "LoopDsc" describes a ("natural") loop. We (currently) require the body of a loop to be a contiguous (in // bbNext order) sequence of basic blocks. (At times, we may require the blocks in a loop to be "properly numbered" // in bbNext order; we use comparisons on the bbNum to decide order.) // The blocks that define the body are // top <= entry <= bottom // The "head" of the loop is a block outside the loop that has "entry" as a successor. We only support loops with a // single 'head' block. The meanings of these blocks are given in the definitions below. Also see the picture at // Compiler::optFindNaturalLoops(). struct LoopDsc { BasicBlock* lpHead; // HEAD of the loop (not part of the looping of the loop) -- has ENTRY as a successor. BasicBlock* lpTop; // loop TOP (the back edge from lpBottom reaches here). Lexically first block (in bbNext // order) reachable in this loop. BasicBlock* lpEntry; // the ENTRY in the loop (in most cases TOP or BOTTOM) BasicBlock* lpBottom; // loop BOTTOM (from here we have a back edge to the TOP) BasicBlock* lpExit; // if a single exit loop this is the EXIT (in most cases BOTTOM) callInterf lpAsgCall; // "callInterf" for calls in the loop ALLVARSET_TP lpAsgVars; // set of vars assigned within the loop (all vars, not just tracked) varRefKinds lpAsgInds : 8; // set of inds modified within the loop LoopFlags lpFlags; unsigned char lpExitCnt; // number of exits from the loop unsigned char lpParent; // The index of the most-nested loop that completely contains this one, // or else BasicBlock::NOT_IN_LOOP if no such loop exists. unsigned char lpChild; // The index of a nested loop, or else BasicBlock::NOT_IN_LOOP if no child exists. // (Actually, an "immediately" nested loop -- // no other child of this loop is a parent of lpChild.) unsigned char lpSibling; // The index of another loop that is an immediate child of lpParent, // or else BasicBlock::NOT_IN_LOOP. One can enumerate all the children of a loop // by following "lpChild" then "lpSibling" links. bool lpLoopHasMemoryHavoc[MemoryKindCount]; // The loop contains an operation that we assume has arbitrary // memory side effects. If this is set, the fields below // may not be accurate (since they become irrelevant.) VARSET_TP lpVarInOut; // The set of variables that are IN or OUT during the execution of this loop VARSET_TP lpVarUseDef; // The set of variables that are USE or DEF during the execution of this loop // The following counts are used for hoisting profitability checks. int lpHoistedExprCount; // The register count for the non-FP expressions from inside this loop that have been // hoisted int lpLoopVarCount; // The register count for the non-FP LclVars that are read/written inside this loop int lpVarInOutCount; // The register count for the non-FP LclVars that are alive inside or across this loop int lpHoistedFPExprCount; // The register count for the FP expressions from inside this loop that have been // hoisted int lpLoopVarFPCount; // The register count for the FP LclVars that are read/written inside this loop int lpVarInOutFPCount; // The register count for the FP LclVars that are alive inside or across this loop typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<struct CORINFO_FIELD_STRUCT_>, FieldKindForVN> FieldHandleSet; FieldHandleSet* lpFieldsModified; // This has entries for all static field and object instance fields modified // in the loop. typedef JitHashTable<CORINFO_CLASS_HANDLE, JitPtrKeyFuncs<struct CORINFO_CLASS_STRUCT_>, bool> ClassHandleSet; ClassHandleSet* lpArrayElemTypesModified; // Bits set indicate the set of sz array element types such that // arrays of that type are modified // in the loop. // Adds the variable liveness information for 'blk' to 'this' LoopDsc void AddVariableLiveness(Compiler* comp, BasicBlock* blk); inline void AddModifiedField(Compiler* comp, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind); // This doesn't *always* take a class handle -- it can also take primitive types, encoded as class handles // (shifted left, with a low-order bit set to distinguish.) // Use the {Encode/Decode}ElemType methods to construct/destruct these. inline void AddModifiedElemType(Compiler* comp, CORINFO_CLASS_HANDLE structHnd); /* The following values are set only for iterator loops, i.e. has the flag LPFLG_ITER set */ GenTree* lpIterTree; // The "i = i <op> const" tree unsigned lpIterVar() const; // iterator variable # int lpIterConst() const; // the constant with which the iterator is incremented genTreeOps lpIterOper() const; // the type of the operation on the iterator (ASG_ADD, ASG_SUB, etc.) void VERIFY_lpIterTree() const; var_types lpIterOperType() const; // For overflow instructions // Set to the block where we found the initialization for LPFLG_CONST_INIT or LPFLG_VAR_INIT loops. // Initially, this will be 'head', but 'head' might change if we insert a loop pre-header block. BasicBlock* lpInitBlock; union { int lpConstInit; // initial constant value of iterator // : Valid if LPFLG_CONST_INIT unsigned lpVarInit; // initial local var number to which we initialize the iterator // : Valid if LPFLG_VAR_INIT }; // The following is for LPFLG_ITER loops only (i.e. the loop condition is "i RELOP const or var") GenTree* lpTestTree; // pointer to the node containing the loop test genTreeOps lpTestOper() const; // the type of the comparison between the iterator and the limit (GT_LE, GT_GE, // etc.) void VERIFY_lpTestTree() const; bool lpIsReversed() const; // true if the iterator node is the second operand in the loop condition GenTree* lpIterator() const; // the iterator node in the loop test GenTree* lpLimit() const; // the limit node in the loop test // Limit constant value of iterator - loop condition is "i RELOP const" // : Valid if LPFLG_CONST_LIMIT int lpConstLimit() const; // The lclVar # in the loop condition ( "i RELOP lclVar" ) // : Valid if LPFLG_VAR_LIMIT unsigned lpVarLimit() const; // The array length in the loop condition ( "i RELOP arr.len" or "i RELOP arr[i][j].len" ) // : Valid if LPFLG_ARRLEN_LIMIT bool lpArrLenLimit(Compiler* comp, ArrIndex* index) const; // Returns "true" iff this is a "top entry" loop. bool lpIsTopEntry() const { if (lpHead->bbNext == lpEntry) { assert(lpHead->bbFallsThrough()); assert(lpTop == lpEntry); return true; } else { return false; } } // Returns "true" iff "*this" contains the blk. bool lpContains(BasicBlock* blk) const { return lpTop->bbNum <= blk->bbNum && blk->bbNum <= lpBottom->bbNum; } // Returns "true" iff "*this" (properly) contains the range [top, bottom] (allowing tops // to be equal, but requiring bottoms to be different.) bool lpContains(BasicBlock* top, BasicBlock* bottom) const { return lpTop->bbNum <= top->bbNum && bottom->bbNum < lpBottom->bbNum; } // Returns "true" iff "*this" (properly) contains "lp2" (allowing tops to be equal, but requiring // bottoms to be different.) bool lpContains(const LoopDsc& lp2) const { return lpContains(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff "*this" is (properly) contained by the range [top, bottom] // (allowing tops to be equal, but requiring bottoms to be different.) bool lpContainedBy(BasicBlock* top, BasicBlock* bottom) const { return top->bbNum <= lpTop->bbNum && lpBottom->bbNum < bottom->bbNum; } // Returns "true" iff "*this" is (properly) contained by "lp2" // (allowing tops to be equal, but requiring bottoms to be different.) bool lpContainedBy(const LoopDsc& lp2) const { return lpContainedBy(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff "*this" is disjoint from the range [top, bottom]. bool lpDisjoint(BasicBlock* top, BasicBlock* bottom) const { return bottom->bbNum < lpTop->bbNum || lpBottom->bbNum < top->bbNum; } // Returns "true" iff "*this" is disjoint from "lp2". bool lpDisjoint(const LoopDsc& lp2) const { return lpDisjoint(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff the loop is well-formed (see code for defn). bool lpWellFormed() const { return lpTop->bbNum <= lpEntry->bbNum && lpEntry->bbNum <= lpBottom->bbNum && (lpHead->bbNum < lpTop->bbNum || lpHead->bbNum > lpBottom->bbNum); } #ifdef DEBUG void lpValidatePreHeader() const { // If this is called, we expect there to be a pre-header. assert(lpFlags & LPFLG_HAS_PREHEAD); // The pre-header must unconditionally enter the loop. assert(lpHead->GetUniqueSucc() == lpEntry); // The loop block must be marked as a pre-header. assert(lpHead->bbFlags & BBF_LOOP_PREHEADER); // The loop entry must have a single non-loop predecessor, which is the pre-header. // We can't assume here that the bbNum are properly ordered, so we can't do a simple lpContained() // check. So, we defer this check, which will be done by `fgDebugCheckLoopTable()`. } #endif // DEBUG // LoopBlocks: convenience method for enabling range-based `for` iteration over all the // blocks in a loop, e.g.: // for (BasicBlock* const block : loop->LoopBlocks()) ... // Currently, the loop blocks are expected to be in linear, lexical, `bbNext` order // from `lpTop` through `lpBottom`, inclusive. All blocks in this range are considered // to be part of the loop. // BasicBlockRangeList LoopBlocks() const { return BasicBlockRangeList(lpTop, lpBottom); } }; protected: bool fgMightHaveLoop(); // returns true if there are any back edges bool fgHasLoops; // True if this method has any loops, set in fgComputeReachability public: LoopDsc* optLoopTable; // loop descriptor table unsigned char optLoopCount; // number of tracked loops unsigned char loopAlignCandidates; // number of loops identified for alignment // Every time we rebuild the loop table, we increase the global "loop epoch". Any loop indices or // loop table pointers from the previous epoch are invalid. // TODO: validate this in some way? unsigned optCurLoopEpoch; void NewLoopEpoch() { ++optCurLoopEpoch; JITDUMP("New loop epoch %d\n", optCurLoopEpoch); } #ifdef DEBUG unsigned char loopsAligned; // number of loops actually aligned #endif // DEBUG bool optRecordLoop(BasicBlock* head, BasicBlock* top, BasicBlock* entry, BasicBlock* bottom, BasicBlock* exit, unsigned char exitCnt); void optClearLoopIterInfo(); #ifdef DEBUG void optPrintLoopInfo(unsigned lnum, bool printVerbose = false); void optPrintLoopInfo(const LoopDsc* loop, bool printVerbose = false); void optPrintLoopTable(); #endif protected: unsigned optCallCount; // number of calls made in the method unsigned optIndirectCallCount; // number of virtual, interface and indirect calls made in the method unsigned optNativeCallCount; // number of Pinvoke/Native calls made in the method unsigned optLoopsCloned; // number of loops cloned in the current method. #ifdef DEBUG void optCheckPreds(); #endif void optResetLoopInfo(); void optFindAndScaleGeneralLoopBlocks(); // Determine if there are any potential loops, and set BBF_LOOP_HEAD on potential loop heads. void optMarkLoopHeads(); void optScaleLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk); void optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk); void optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmarkLoop = false); bool optIsLoopTestEvalIntoTemp(Statement* testStmt, Statement** newTestStmt); unsigned optIsLoopIncrTree(GenTree* incr); bool optCheckIterInLoopTest(unsigned loopInd, GenTree* test, BasicBlock* from, BasicBlock* to, unsigned iterVar); bool optComputeIterInfo(GenTree* incr, BasicBlock* from, BasicBlock* to, unsigned* pIterVar); bool optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenTree* init, unsigned iterVar); bool optExtractInitTestIncr( BasicBlock* head, BasicBlock* bottom, BasicBlock* exit, GenTree** ppInit, GenTree** ppTest, GenTree** ppIncr); void optFindNaturalLoops(); void optIdentifyLoopsForAlignment(); // Ensures that all the loops in the loop nest rooted at "loopInd" (an index into the loop table) are 'canonical' -- // each loop has a unique "top." Returns "true" iff the flowgraph has been modified. bool optCanonicalizeLoopNest(unsigned char loopInd); // Ensures that the loop "loopInd" (an index into the loop table) is 'canonical' -- it has a unique "top," // unshared with any other loop. Returns "true" iff the flowgraph has been modified bool optCanonicalizeLoop(unsigned char loopInd); // Requires "l1" to be a valid loop table index, and not "BasicBlock::NOT_IN_LOOP". // Requires "l2" to be a valid loop table index, or else "BasicBlock::NOT_IN_LOOP". // Returns true iff "l2" is not NOT_IN_LOOP, and "l1" contains "l2". // A loop contains itself. bool optLoopContains(unsigned l1, unsigned l2) const; // Updates the loop table by changing loop "loopInd", whose head is required // to be "from", to be "to". Also performs this transformation for any // loop nested in "loopInd" that shares the same head as "loopInd". void optUpdateLoopHead(unsigned loopInd, BasicBlock* from, BasicBlock* to); void optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, const bool updatePreds = false); // Marks the containsCall information to "lnum" and any parent loops. void AddContainsCallAllContainingLoops(unsigned lnum); // Adds the variable liveness information from 'blk' to "lnum" and any parent loops. void AddVariableLivenessAllContainingLoops(unsigned lnum, BasicBlock* blk); // Adds "fldHnd" to the set of modified fields of "lnum" and any parent loops. void AddModifiedFieldAllContainingLoops(unsigned lnum, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind); // Adds "elemType" to the set of modified array element types of "lnum" and any parent loops. void AddModifiedElemTypeAllContainingLoops(unsigned lnum, CORINFO_CLASS_HANDLE elemType); // Requires that "from" and "to" have the same "bbJumpKind" (perhaps because "to" is a clone // of "from".) Copies the jump destination from "from" to "to". void optCopyBlkDest(BasicBlock* from, BasicBlock* to); // Returns true if 'block' is an entry block for any loop in 'optLoopTable' bool optIsLoopEntry(BasicBlock* block) const; // The depth of the loop described by "lnum" (an index into the loop table.) (0 == top level) unsigned optLoopDepth(unsigned lnum) { assert(lnum < optLoopCount); unsigned depth = 0; while ((lnum = optLoopTable[lnum].lpParent) != BasicBlock::NOT_IN_LOOP) { ++depth; } return depth; } // Struct used in optInvertWhileLoop to count interesting constructs to boost the profitability score. struct OptInvertCountTreeInfoType { int sharedStaticHelperCount; int arrayLengthCount; }; static fgWalkResult optInvertCountTreeInfo(GenTree** pTree, fgWalkData* data); bool optInvertWhileLoop(BasicBlock* block); private: static bool optIterSmallOverflow(int iterAtExit, var_types incrType); static bool optIterSmallUnderflow(int iterAtExit, var_types decrType); bool optComputeLoopRep(int constInit, int constLimit, int iterInc, genTreeOps iterOper, var_types iterType, genTreeOps testOper, bool unsignedTest, bool dupCond, unsigned* iterCount); static fgWalkPreFn optIsVarAssgCB; protected: bool optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTree* skip, unsigned var); bool optIsVarAssgLoop(unsigned lnum, unsigned var); int optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKinds inds = VR_NONE); bool optNarrowTree(GenTree* tree, var_types srct, var_types dstt, ValueNumPair vnpNarrow, bool doit); protected: // The following is the upper limit on how many expressions we'll keep track // of for the CSE analysis. // static const unsigned MAX_CSE_CNT = EXPSET_SZ; static const int MIN_CSE_COST = 2; // BitVec trait information only used by the optCSE_canSwap() method, for the CSE_defMask and CSE_useMask. // This BitVec uses one bit per CSE candidate BitVecTraits* cseMaskTraits; // one bit per CSE candidate // BitVec trait information for computing CSE availability using the CSE_DataFlow algorithm. // Two bits are allocated per CSE candidate to compute CSE availability // plus an extra bit to handle the initial unvisited case. // (See CSE_DataFlow::EndMerge for an explanation of why this is necessary.) // // The two bits per CSE candidate have the following meanings: // 11 - The CSE is available, and is also available when considering calls as killing availability. // 10 - The CSE is available, but is not available when considering calls as killing availability. // 00 - The CSE is not available // 01 - An illegal combination // BitVecTraits* cseLivenessTraits; //----------------------------------------------------------------------------------------------------------------- // getCSEnum2bit: Return the normalized index to use in the EXPSET_TP for the CSE with the given CSE index. // Each GenTree has a `gtCSEnum` field. Zero is reserved to mean this node is not a CSE, positive values indicate // CSE uses, and negative values indicate CSE defs. The caller must pass a non-zero positive value, as from // GET_CSE_INDEX(). // static unsigned genCSEnum2bit(unsigned CSEnum) { assert((CSEnum > 0) && (CSEnum <= MAX_CSE_CNT)); return CSEnum - 1; } //----------------------------------------------------------------------------------------------------------------- // getCSEAvailBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit for a CSE. // static unsigned getCSEAvailBit(unsigned CSEnum) { return genCSEnum2bit(CSEnum) * 2; } //----------------------------------------------------------------------------------------------------------------- // getCSEAvailCrossCallBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit // for a CSE considering calls as killing availability bit (see description above). // static unsigned getCSEAvailCrossCallBit(unsigned CSEnum) { return getCSEAvailBit(CSEnum) + 1; } void optPrintCSEDataFlowSet(EXPSET_VALARG_TP cseDataFlowSet, bool includeBits = true); EXPSET_TP cseCallKillsMask; // Computed once - A mask that is used to kill available CSEs at callsites /* Generic list of nodes - used by the CSE logic */ struct treeLst { treeLst* tlNext; GenTree* tlTree; }; struct treeStmtLst { treeStmtLst* tslNext; GenTree* tslTree; // tree node Statement* tslStmt; // statement containing the tree BasicBlock* tslBlock; // block containing the statement }; // The following logic keeps track of expressions via a simple hash table. struct CSEdsc { CSEdsc* csdNextInBucket; // used by the hash table size_t csdHashKey; // the orginal hashkey ssize_t csdConstDefValue; // When we CSE similar constants, this is the value that we use as the def ValueNum csdConstDefVN; // When we CSE similar constants, this is the ValueNumber that we use for the LclVar // assignment unsigned csdIndex; // 1..optCSECandidateCount bool csdIsSharedConst; // true if this CSE is a shared const bool csdLiveAcrossCall; unsigned short csdDefCount; // definition count unsigned short csdUseCount; // use count (excluding the implicit uses at defs) weight_t csdDefWtCnt; // weighted def count weight_t csdUseWtCnt; // weighted use count (excluding the implicit uses at defs) GenTree* csdTree; // treenode containing the 1st occurrence Statement* csdStmt; // stmt containing the 1st occurrence BasicBlock* csdBlock; // block containing the 1st occurrence treeStmtLst* csdTreeList; // list of matching tree nodes: head treeStmtLst* csdTreeLast; // list of matching tree nodes: tail // ToDo: This can be removed when gtGetStructHandleIfPresent stops guessing // and GT_IND nodes always have valid struct handle. // CORINFO_CLASS_HANDLE csdStructHnd; // The class handle, currently needed to create a SIMD LclVar in PerformCSE bool csdStructHndMismatch; ValueNum defExcSetPromise; // The exception set that is now required for all defs of this CSE. // This will be set to NoVN if we decide to abandon this CSE ValueNum defExcSetCurrent; // The set of exceptions we currently can use for CSE uses. ValueNum defConservNormVN; // if all def occurrences share the same conservative normal value // number, this will reflect it; otherwise, NoVN. // not used for shared const CSE's }; static const size_t s_optCSEhashSizeInitial; static const size_t s_optCSEhashGrowthFactor; static const size_t s_optCSEhashBucketSize; size_t optCSEhashSize; // The current size of hashtable size_t optCSEhashCount; // Number of entries in hashtable size_t optCSEhashMaxCountBeforeResize; // Number of entries before resize CSEdsc** optCSEhash; CSEdsc** optCSEtab; typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, GenTree*> NodeToNodeMap; NodeToNodeMap* optCseCheckedBoundMap; // Maps bound nodes to ancestor compares that should be // re-numbered with the bound to improve range check elimination // Given a compare, look for a cse candidate checked bound feeding it and add a map entry if found. void optCseUpdateCheckedBoundMap(GenTree* compare); void optCSEstop(); CSEdsc* optCSEfindDsc(unsigned index); bool optUnmarkCSE(GenTree* tree); // user defined callback data for the tree walk function optCSE_MaskHelper() struct optCSE_MaskData { EXPSET_TP CSE_defMask; EXPSET_TP CSE_useMask; }; // Treewalk helper for optCSE_DefMask and optCSE_UseMask static fgWalkPreFn optCSE_MaskHelper; // This function walks all the node for an given tree // and return the mask of CSE definitions and uses for the tree // void optCSE_GetMaskData(GenTree* tree, optCSE_MaskData* pMaskData); // Given a binary tree node return true if it is safe to swap the order of evaluation for op1 and op2. bool optCSE_canSwap(GenTree* firstNode, GenTree* secondNode); struct optCSEcostCmpEx { bool operator()(const CSEdsc* op1, const CSEdsc* op2); }; struct optCSEcostCmpSz { bool operator()(const CSEdsc* op1, const CSEdsc* op2); }; void optCleanupCSEs(); #ifdef DEBUG void optEnsureClearCSEInfo(); #endif // DEBUG static bool Is_Shared_Const_CSE(size_t key) { return ((key & TARGET_SIGN_BIT) != 0); } // returns the encoded key static size_t Encode_Shared_Const_CSE_Value(size_t key) { return TARGET_SIGN_BIT | (key >> CSE_CONST_SHARED_LOW_BITS); } // returns the orginal key static size_t Decode_Shared_Const_CSE_Value(size_t enckey) { assert(Is_Shared_Const_CSE(enckey)); return (enckey & ~TARGET_SIGN_BIT) << CSE_CONST_SHARED_LOW_BITS; } /************************************************************************** * Value Number based CSEs *************************************************************************/ // String to use for formatting CSE numbers. Note that this is the positive number, e.g., from GET_CSE_INDEX(). #define FMT_CSE "CSE #%02u" public: void optOptimizeValnumCSEs(); protected: void optValnumCSE_Init(); unsigned optValnumCSE_Index(GenTree* tree, Statement* stmt); bool optValnumCSE_Locate(); void optValnumCSE_InitDataFlow(); void optValnumCSE_DataFlow(); void optValnumCSE_Availablity(); void optValnumCSE_Heuristic(); bool optDoCSE; // True when we have found a duplicate CSE tree bool optValnumCSE_phase; // True when we are executing the optOptimizeValnumCSEs() phase unsigned optCSECandidateCount; // Count of CSE's candidates unsigned optCSEstart; // The first local variable number that is a CSE unsigned optCSEcount; // The total count of CSE's introduced. weight_t optCSEweight; // The weight of the current block when we are doing PerformCSE bool optIsCSEcandidate(GenTree* tree); // lclNumIsTrueCSE returns true if the LclVar was introduced by the CSE phase of the compiler // bool lclNumIsTrueCSE(unsigned lclNum) const { return ((optCSEcount > 0) && (lclNum >= optCSEstart) && (lclNum < optCSEstart + optCSEcount)); } // lclNumIsCSE returns true if the LclVar should be treated like a CSE with regards to constant prop. // bool lclNumIsCSE(unsigned lclNum) const { return lvaGetDesc(lclNum)->lvIsCSE; } #ifdef DEBUG bool optConfigDisableCSE(); bool optConfigDisableCSE2(); #endif void optOptimizeCSEs(); struct isVarAssgDsc { GenTree* ivaSkip; ALLVARSET_TP ivaMaskVal; // Set of variables assigned to. This is a set of all vars, not tracked vars. #ifdef DEBUG void* ivaSelf; #endif unsigned ivaVar; // Variable we are interested in, or -1 varRefKinds ivaMaskInd; // What kind of indirect assignments are there? callInterf ivaMaskCall; // What kind of calls are there? bool ivaMaskIncomplete; // Variables not representable in ivaMaskVal were assigned to. }; static callInterf optCallInterf(GenTreeCall* call); public: // VN based copy propagation. // In DEBUG builds, we'd like to know the tree that the SSA definition was pushed for. // While for ordinary SSA defs it will be available (as an ASG) in the SSA descriptor, // for locals which will use "definitions from uses", it will not be, so we store it // in this class instead. class CopyPropSsaDef { LclSsaVarDsc* m_ssaDef; #ifdef DEBUG GenTree* m_defNode; #endif public: CopyPropSsaDef(LclSsaVarDsc* ssaDef, GenTree* defNode) : m_ssaDef(ssaDef) #ifdef DEBUG , m_defNode(defNode) #endif { } LclSsaVarDsc* GetSsaDef() const { return m_ssaDef; } #ifdef DEBUG GenTree* GetDefNode() const { return m_defNode; } #endif }; typedef ArrayStack<CopyPropSsaDef> CopyPropSsaDefStack; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, CopyPropSsaDefStack*> LclNumToLiveDefsMap; // Copy propagation functions. void optCopyProp(Statement* stmt, GenTreeLclVarCommon* tree, unsigned lclNum, LclNumToLiveDefsMap* curSsaName); void optBlockCopyPropPopStacks(BasicBlock* block, LclNumToLiveDefsMap* curSsaName); void optBlockCopyProp(BasicBlock* block, LclNumToLiveDefsMap* curSsaName); void optCopyPropPushDef(GenTreeOp* asg, GenTreeLclVarCommon* lclNode, unsigned lclNum, LclNumToLiveDefsMap* curSsaName); unsigned optIsSsaLocal(GenTreeLclVarCommon* lclNode); int optCopyProp_LclVarScore(const LclVarDsc* lclVarDsc, const LclVarDsc* copyVarDsc, bool preferOp2); void optVnCopyProp(); INDEBUG(void optDumpCopyPropStack(LclNumToLiveDefsMap* curSsaName)); /************************************************************************** * Early value propagation *************************************************************************/ struct SSAName { unsigned m_lvNum; unsigned m_ssaNum; SSAName(unsigned lvNum, unsigned ssaNum) : m_lvNum(lvNum), m_ssaNum(ssaNum) { } static unsigned GetHashCode(SSAName ssaNm) { return (ssaNm.m_lvNum << 16) | (ssaNm.m_ssaNum); } static bool Equals(SSAName ssaNm1, SSAName ssaNm2) { return (ssaNm1.m_lvNum == ssaNm2.m_lvNum) && (ssaNm1.m_ssaNum == ssaNm2.m_ssaNum); } }; #define OMF_HAS_NEWARRAY 0x00000001 // Method contains 'new' of an array #define OMF_HAS_NEWOBJ 0x00000002 // Method contains 'new' of an object type. #define OMF_HAS_ARRAYREF 0x00000004 // Method contains array element loads or stores. #define OMF_HAS_NULLCHECK 0x00000008 // Method contains null check. #define OMF_HAS_FATPOINTER 0x00000010 // Method contains call, that needs fat pointer transformation. #define OMF_HAS_OBJSTACKALLOC 0x00000020 // Method contains an object allocated on the stack. #define OMF_HAS_GUARDEDDEVIRT 0x00000040 // Method contains guarded devirtualization candidate #define OMF_HAS_EXPRUNTIMELOOKUP 0x00000080 // Method contains a runtime lookup to an expandable dictionary. #define OMF_HAS_PATCHPOINT 0x00000100 // Method contains patchpoints #define OMF_NEEDS_GCPOLLS 0x00000200 // Method needs GC polls #define OMF_HAS_FROZEN_STRING 0x00000400 // Method has a frozen string (REF constant int), currently only on CoreRT. #define OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT 0x00000800 // Method contains partial compilation patchpoints #define OMF_HAS_TAILCALL_SUCCESSOR 0x00001000 // Method has potential tail call in a non BBJ_RETURN block bool doesMethodHaveFatPointer() { return (optMethodFlags & OMF_HAS_FATPOINTER) != 0; } void setMethodHasFatPointer() { optMethodFlags |= OMF_HAS_FATPOINTER; } void clearMethodHasFatPointer() { optMethodFlags &= ~OMF_HAS_FATPOINTER; } void addFatPointerCandidate(GenTreeCall* call); bool doesMethodHaveFrozenString() const { return (optMethodFlags & OMF_HAS_FROZEN_STRING) != 0; } void setMethodHasFrozenString() { optMethodFlags |= OMF_HAS_FROZEN_STRING; } bool doesMethodHaveGuardedDevirtualization() const { return (optMethodFlags & OMF_HAS_GUARDEDDEVIRT) != 0; } void setMethodHasGuardedDevirtualization() { optMethodFlags |= OMF_HAS_GUARDEDDEVIRT; } void clearMethodHasGuardedDevirtualization() { optMethodFlags &= ~OMF_HAS_GUARDEDDEVIRT; } void considerGuardedDevirtualization(GenTreeCall* call, IL_OFFSET ilOffset, bool isInterface, CORINFO_METHOD_HANDLE baseMethod, CORINFO_CLASS_HANDLE baseClass, CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass) DEBUGARG(const char* objClassName)); void addGuardedDevirtualizationCandidate(GenTreeCall* call, CORINFO_METHOD_HANDLE methodHandle, CORINFO_CLASS_HANDLE classHandle, unsigned methodAttr, unsigned classAttr, unsigned likelihood); bool doesMethodHaveExpRuntimeLookup() { return (optMethodFlags & OMF_HAS_EXPRUNTIMELOOKUP) != 0; } void setMethodHasExpRuntimeLookup() { optMethodFlags |= OMF_HAS_EXPRUNTIMELOOKUP; } void clearMethodHasExpRuntimeLookup() { optMethodFlags &= ~OMF_HAS_EXPRUNTIMELOOKUP; } void addExpRuntimeLookupCandidate(GenTreeCall* call); bool doesMethodHavePatchpoints() { return (optMethodFlags & OMF_HAS_PATCHPOINT) != 0; } void setMethodHasPatchpoint() { optMethodFlags |= OMF_HAS_PATCHPOINT; } bool doesMethodHavePartialCompilationPatchpoints() { return (optMethodFlags & OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT) != 0; } void setMethodHasPartialCompilationPatchpoint() { optMethodFlags |= OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT; } unsigned optMethodFlags; bool doesMethodHaveNoReturnCalls() { return optNoReturnCallCount > 0; } void setMethodHasNoReturnCalls() { optNoReturnCallCount++; } unsigned optNoReturnCallCount; // Recursion bound controls how far we can go backwards tracking for a SSA value. // No throughput diff was found with backward walk bound between 3-8. static const int optEarlyPropRecurBound = 5; enum class optPropKind { OPK_INVALID, OPK_ARRAYLEN, OPK_NULLCHECK }; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, GenTree*> LocalNumberToNullCheckTreeMap; GenTree* getArrayLengthFromAllocation(GenTree* tree DEBUGARG(BasicBlock* block)); GenTree* optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPropKind valueKind, int walkDepth); GenTree* optPropGetValue(unsigned lclNum, unsigned ssaNum, optPropKind valueKind); GenTree* optEarlyPropRewriteTree(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); bool optDoEarlyPropForBlock(BasicBlock* block); bool optDoEarlyPropForFunc(); void optEarlyProp(); void optFoldNullCheck(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); GenTree* optFindNullCheckToFold(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); bool optIsNullCheckFoldingLegal(GenTree* tree, GenTree* nullCheckTree, GenTree** nullCheckParent, Statement** nullCheckStmt); bool optCanMoveNullCheckPastTree(GenTree* tree, unsigned nullCheckLclNum, bool isInsideTry, bool checkSideEffectSummary); #if DEBUG void optCheckFlagsAreSet(unsigned methodFlag, const char* methodFlagStr, unsigned bbFlag, const char* bbFlagStr, GenTree* tree, BasicBlock* basicBlock); #endif // Redundant branch opts // PhaseStatus optRedundantBranches(); bool optRedundantRelop(BasicBlock* const block); bool optRedundantBranch(BasicBlock* const block); bool optJumpThread(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop); bool optReachable(BasicBlock* const fromBlock, BasicBlock* const toBlock, BasicBlock* const excludedBlock); /************************************************************************** * Value/Assertion propagation *************************************************************************/ public: // Data structures for assertion prop BitVecTraits* apTraits; ASSERT_TP apFull; enum optAssertionKind { OAK_INVALID, OAK_EQUAL, OAK_NOT_EQUAL, OAK_SUBRANGE, OAK_NO_THROW, OAK_COUNT }; enum optOp1Kind { O1K_INVALID, O1K_LCLVAR, O1K_ARR_BND, O1K_BOUND_OPER_BND, O1K_BOUND_LOOP_BND, O1K_CONSTANT_LOOP_BND, O1K_CONSTANT_LOOP_BND_UN, O1K_EXACT_TYPE, O1K_SUBTYPE, O1K_VALUE_NUMBER, O1K_COUNT }; enum optOp2Kind { O2K_INVALID, O2K_LCLVAR_COPY, O2K_IND_CNS_INT, O2K_CONST_INT, O2K_CONST_LONG, O2K_CONST_DOUBLE, O2K_ZEROOBJ, O2K_SUBRANGE, O2K_COUNT }; struct AssertionDsc { optAssertionKind assertionKind; struct SsaVar { unsigned lclNum; // assigned to or property of this local var number unsigned ssaNum; }; struct ArrBnd { ValueNum vnIdx; ValueNum vnLen; }; struct AssertionDscOp1 { optOp1Kind kind; // a normal LclVar, or Exact-type or Subtype ValueNum vn; union { SsaVar lcl; ArrBnd bnd; }; } op1; struct AssertionDscOp2 { optOp2Kind kind; // a const or copy assignment ValueNum vn; struct IntVal { ssize_t iconVal; // integer #if !defined(HOST_64BIT) unsigned padding; // unused; ensures iconFlags does not overlap lconVal #endif GenTreeFlags iconFlags; // gtFlags }; union { struct { SsaVar lcl; FieldSeqNode* zeroOffsetFieldSeq; }; IntVal u1; __int64 lconVal; double dconVal; IntegralRange u2; }; } op2; bool IsCheckedBoundArithBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_OPER_BND); } bool IsCheckedBoundBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_LOOP_BND); } bool IsConstantBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && (op1.kind == O1K_CONSTANT_LOOP_BND)); } bool IsConstantBoundUnsigned() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && (op1.kind == O1K_CONSTANT_LOOP_BND_UN)); } bool IsBoundsCheckNoThrow() { return ((assertionKind == OAK_NO_THROW) && (op1.kind == O1K_ARR_BND)); } bool IsCopyAssertion() { return ((assertionKind == OAK_EQUAL) && (op1.kind == O1K_LCLVAR) && (op2.kind == O2K_LCLVAR_COPY)); } bool IsConstantInt32Assertion() { return ((assertionKind == OAK_EQUAL) || (assertionKind == OAK_NOT_EQUAL)) && (op2.kind == O2K_CONST_INT); } static bool SameKind(AssertionDsc* a1, AssertionDsc* a2) { return a1->assertionKind == a2->assertionKind && a1->op1.kind == a2->op1.kind && a1->op2.kind == a2->op2.kind; } static bool ComplementaryKind(optAssertionKind kind, optAssertionKind kind2) { if (kind == OAK_EQUAL) { return kind2 == OAK_NOT_EQUAL; } else if (kind == OAK_NOT_EQUAL) { return kind2 == OAK_EQUAL; } return false; } bool HasSameOp1(AssertionDsc* that, bool vnBased) { if (op1.kind != that->op1.kind) { return false; } else if (op1.kind == O1K_ARR_BND) { assert(vnBased); return (op1.bnd.vnIdx == that->op1.bnd.vnIdx) && (op1.bnd.vnLen == that->op1.bnd.vnLen); } else { return ((vnBased && (op1.vn == that->op1.vn)) || (!vnBased && (op1.lcl.lclNum == that->op1.lcl.lclNum))); } } bool HasSameOp2(AssertionDsc* that, bool vnBased) { if (op2.kind != that->op2.kind) { return false; } switch (op2.kind) { case O2K_IND_CNS_INT: case O2K_CONST_INT: return ((op2.u1.iconVal == that->op2.u1.iconVal) && (op2.u1.iconFlags == that->op2.u1.iconFlags)); case O2K_CONST_LONG: return (op2.lconVal == that->op2.lconVal); case O2K_CONST_DOUBLE: // exact match because of positive and negative zero. return (memcmp(&op2.dconVal, &that->op2.dconVal, sizeof(double)) == 0); case O2K_ZEROOBJ: return true; case O2K_LCLVAR_COPY: return (op2.lcl.lclNum == that->op2.lcl.lclNum) && (!vnBased || op2.lcl.ssaNum == that->op2.lcl.ssaNum) && (op2.zeroOffsetFieldSeq == that->op2.zeroOffsetFieldSeq); case O2K_SUBRANGE: return op2.u2.Equals(that->op2.u2); case O2K_INVALID: // we will return false break; default: assert(!"Unexpected value for op2.kind in AssertionDsc."); break; } return false; } bool Complementary(AssertionDsc* that, bool vnBased) { return ComplementaryKind(assertionKind, that->assertionKind) && HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased); } bool Equals(AssertionDsc* that, bool vnBased) { if (assertionKind != that->assertionKind) { return false; } else if (assertionKind == OAK_NO_THROW) { assert(op2.kind == O2K_INVALID); return HasSameOp1(that, vnBased); } else { return HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased); } } }; protected: static fgWalkPreFn optAddCopiesCallback; static fgWalkPreFn optVNAssertionPropCurStmtVisitor; unsigned optAddCopyLclNum; GenTree* optAddCopyAsgnNode; bool optLocalAssertionProp; // indicates that we are performing local assertion prop bool optAssertionPropagated; // set to true if we modified the trees bool optAssertionPropagatedCurrentStmt; #ifdef DEBUG GenTree* optAssertionPropCurrentTree; #endif AssertionIndex* optComplementaryAssertionMap; JitExpandArray<ASSERT_TP>* optAssertionDep; // table that holds dependent assertions (assertions // using the value of a local var) for each local var AssertionDsc* optAssertionTabPrivate; // table that holds info about value assignments AssertionIndex optAssertionCount; // total number of assertions in the assertion table AssertionIndex optMaxAssertionCount; public: void optVnNonNullPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); fgWalkResult optVNConstantPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); GenTree* optVNConstantPropOnJTrue(BasicBlock* block, GenTree* test); GenTree* optVNConstantPropOnTree(BasicBlock* block, GenTree* tree); GenTree* optExtractSideEffListFromConst(GenTree* tree); AssertionIndex GetAssertionCount() { return optAssertionCount; } ASSERT_TP* bbJtrueAssertionOut; typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, ASSERT_TP> ValueNumToAssertsMap; ValueNumToAssertsMap* optValueNumToAsserts; // Assertion prop helpers. ASSERT_TP& GetAssertionDep(unsigned lclNum); AssertionDsc* optGetAssertion(AssertionIndex assertIndex); void optAssertionInit(bool isLocalProp); void optAssertionTraitsInit(AssertionIndex assertionCount); void optAssertionReset(AssertionIndex limit); void optAssertionRemove(AssertionIndex index); // Assertion prop data flow functions. void optAssertionPropMain(); Statement* optVNAssertionPropCurStmt(BasicBlock* block, Statement* stmt); bool optIsTreeKnownIntValue(bool vnBased, GenTree* tree, ssize_t* pConstant, GenTreeFlags* pIconFlags); ASSERT_TP* optInitAssertionDataflowFlags(); ASSERT_TP* optComputeAssertionGen(); // Assertion Gen functions. void optAssertionGen(GenTree* tree); AssertionIndex optAssertionGenCast(GenTreeCast* cast); AssertionIndex optAssertionGenPhiDefn(GenTree* tree); AssertionInfo optCreateJTrueBoundsAssertion(GenTree* tree); AssertionInfo optAssertionGenJtrue(GenTree* tree); AssertionIndex optCreateJtrueAssertions(GenTree* op1, GenTree* op2, Compiler::optAssertionKind assertionKind, bool helperCallArgs = false); AssertionIndex optFindComplementary(AssertionIndex assertionIndex); void optMapComplementary(AssertionIndex assertionIndex, AssertionIndex index); // Assertion creation functions. AssertionIndex optCreateAssertion(GenTree* op1, GenTree* op2, optAssertionKind assertionKind, bool helperCallArgs = false); AssertionIndex optFinalizeCreatingAssertion(AssertionDsc* assertion); bool optTryExtractSubrangeAssertion(GenTree* source, IntegralRange* pRange); void optCreateComplementaryAssertion(AssertionIndex assertionIndex, GenTree* op1, GenTree* op2, bool helperCallArgs = false); bool optAssertionVnInvolvesNan(AssertionDsc* assertion); AssertionIndex optAddAssertion(AssertionDsc* assertion); void optAddVnAssertionMapping(ValueNum vn, AssertionIndex index); #ifdef DEBUG void optPrintVnAssertionMapping(); #endif ASSERT_TP optGetVnMappedAssertions(ValueNum vn); // Used for respective assertion propagations. AssertionIndex optAssertionIsSubrange(GenTree* tree, IntegralRange range, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsSubtype(GenTree* tree, GenTree* methodTableArg, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsNonNullInternal(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased)); bool optAssertionIsNonNull(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex)); AssertionIndex optGlobalAssertionIsEqualOrNotEqual(ASSERT_VALARG_TP assertions, GenTree* op1, GenTree* op2); AssertionIndex optGlobalAssertionIsEqualOrNotEqualZero(ASSERT_VALARG_TP assertions, GenTree* op1); AssertionIndex optLocalAssertionIsEqualOrNotEqual( optOp1Kind op1Kind, unsigned lclNum, optOp2Kind op2Kind, ssize_t cnsVal, ASSERT_VALARG_TP assertions); // Assertion prop for lcl var functions. bool optAssertionProp_LclVarTypeCheck(GenTree* tree, LclVarDsc* lclVarDsc, LclVarDsc* copyVarDsc); GenTree* optCopyAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, Statement* stmt DEBUGARG(AssertionIndex index)); GenTree* optConstantAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, Statement* stmt DEBUGARG(AssertionIndex index)); bool optZeroObjAssertionProp(GenTree* tree, ASSERT_VALARG_TP assertions); // Assertion propagation functions. GenTree* optAssertionProp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt, BasicBlock* block); GenTree* optAssertionProp_LclVar(ASSERT_VALARG_TP assertions, GenTreeLclVarCommon* tree, Statement* stmt); GenTree* optAssertionProp_Asg(ASSERT_VALARG_TP assertions, GenTreeOp* asg, Statement* stmt); GenTree* optAssertionProp_Return(ASSERT_VALARG_TP assertions, GenTreeUnOp* ret, Statement* stmt); GenTree* optAssertionProp_Ind(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Cast(ASSERT_VALARG_TP assertions, GenTreeCast* cast, Statement* stmt); GenTree* optAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call, Statement* stmt); GenTree* optAssertionProp_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Comma(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_BndsChk(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Update(GenTree* newTree, GenTree* tree, Statement* stmt); GenTree* optNonNullAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call); // Implied assertion functions. void optImpliedAssertions(AssertionIndex assertionIndex, ASSERT_TP& activeAssertions); void optImpliedByTypeOfAssertions(ASSERT_TP& activeAssertions); void optImpliedByCopyAssertion(AssertionDsc* copyAssertion, AssertionDsc* depAssertion, ASSERT_TP& result); void optImpliedByConstAssertion(AssertionDsc* curAssertion, ASSERT_TP& result); #ifdef DEBUG void optPrintAssertion(AssertionDsc* newAssertion, AssertionIndex assertionIndex = 0); void optPrintAssertionIndex(AssertionIndex index); void optPrintAssertionIndices(ASSERT_TP assertions); void optDebugCheckAssertion(AssertionDsc* assertion); void optDebugCheckAssertions(AssertionIndex AssertionIndex); #endif static void optDumpAssertionIndices(const char* header, ASSERT_TP assertions, const char* footer = nullptr); static void optDumpAssertionIndices(ASSERT_TP assertions, const char* footer = nullptr); void optAddCopies(); /************************************************************************** * Range checks *************************************************************************/ public: struct LoopCloneVisitorInfo { LoopCloneContext* context; unsigned loopNum; Statement* stmt; LoopCloneVisitorInfo(LoopCloneContext* context, unsigned loopNum, Statement* stmt) : context(context), loopNum(loopNum), stmt(nullptr) { } }; bool optIsStackLocalInvariant(unsigned loopNum, unsigned lclNum); bool optExtractArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum); bool optReconstructArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum); bool optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneContext* context); static fgWalkPreFn optCanOptimizeByLoopCloningVisitor; fgWalkResult optCanOptimizeByLoopCloning(GenTree* tree, LoopCloneVisitorInfo* info); bool optObtainLoopCloningOpts(LoopCloneContext* context); bool optIsLoopClonable(unsigned loopInd); bool optLoopCloningEnabled(); #ifdef DEBUG void optDebugLogLoopCloning(BasicBlock* block, Statement* insertBefore); #endif void optPerformStaticOptimizations(unsigned loopNum, LoopCloneContext* context DEBUGARG(bool fastPath)); bool optComputeDerefConditions(unsigned loopNum, LoopCloneContext* context); bool optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext* context); BasicBlock* optInsertLoopChoiceConditions(LoopCloneContext* context, unsigned loopNum, BasicBlock* slowHead, BasicBlock* insertAfter); protected: ssize_t optGetArrayRefScaleAndIndex(GenTree* mul, GenTree** pIndex DEBUGARG(bool bRngChk)); bool optReachWithoutCall(BasicBlock* srcBB, BasicBlock* dstBB); protected: bool optLoopsMarked; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX RegAlloc XX XX XX XX Does the register allocation and puts the remaining lclVars on the stack XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: regNumber raUpdateRegStateForArg(RegState* regState, LclVarDsc* argDsc); void raMarkStkVars(); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE #if defined(TARGET_AMD64) static bool varTypeNeedsPartialCalleeSave(var_types type) { assert(type != TYP_STRUCT); return (type == TYP_SIMD32); } #elif defined(TARGET_ARM64) static bool varTypeNeedsPartialCalleeSave(var_types type) { assert(type != TYP_STRUCT); // ARM64 ABI FP Callee save registers only require Callee to save lower 8 Bytes // For SIMD types longer than 8 bytes Caller is responsible for saving and restoring Upper bytes. return ((type == TYP_SIMD16) || (type == TYP_SIMD12)); } #else // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) #error("Unknown target architecture for FEATURE_SIMD") #endif // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE protected: // Some things are used by both LSRA and regpredict allocators. FrameType rpFrameType; bool rpMustCreateEBPCalled; // Set to true after we have called rpMustCreateEBPFrame once bool rpMustCreateEBPFrame(INDEBUG(const char** wbReason)); private: Lowering* m_pLowering; // Lowering; needed to Lower IR that's added or modified after Lowering. LinearScanInterface* m_pLinearScan; // Linear Scan allocator /* raIsVarargsStackArg is called by raMaskStkVars and by lvaComputeRefCounts. It identifies the special case where a varargs function has a parameter passed on the stack, other than the special varargs handle. Such parameters require special treatment, because they cannot be tracked by the GC (their offsets in the stack are not known at compile time). */ bool raIsVarargsStackArg(unsigned lclNum) { #ifdef TARGET_X86 LclVarDsc* varDsc = lvaGetDesc(lclNum); assert(varDsc->lvIsParam); return (info.compIsVarArgs && !varDsc->lvIsRegArg && (lclNum != lvaVarargsHandleArg)); #else // TARGET_X86 return false; #endif // TARGET_X86 } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX EEInterface XX XX XX XX Get to the class and method info from the Execution Engine given XX XX tokens for the class and method XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // Get handles void eeGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedToken, CORINFO_CALLINFO_FLAGS flags, CORINFO_CALL_INFO* pResult); void eeGetFieldInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS flags, CORINFO_FIELD_INFO* pResult); // Get the flags bool eeIsValueClass(CORINFO_CLASS_HANDLE clsHnd); bool eeIsIntrinsic(CORINFO_METHOD_HANDLE ftn); bool eeIsFieldStatic(CORINFO_FIELD_HANDLE fldHnd); var_types eeGetFieldType(CORINFO_FIELD_HANDLE fldHnd, CORINFO_CLASS_HANDLE* pStructHnd = nullptr); #if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) || defined(TRACK_LSRA_STATS) const char* eeGetMethodName(CORINFO_METHOD_HANDLE hnd, const char** className); const char* eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd); unsigned compMethodHash(CORINFO_METHOD_HANDLE methodHandle); bool eeIsNativeMethod(CORINFO_METHOD_HANDLE method); CORINFO_METHOD_HANDLE eeGetMethodHandleForNative(CORINFO_METHOD_HANDLE method); #endif var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig, bool* isPinned); CORINFO_CLASS_HANDLE eeGetArgClass(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE list); CORINFO_CLASS_HANDLE eeGetClassFromContext(CORINFO_CONTEXT_HANDLE context); unsigned eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); static unsigned eeGetArgSizeAlignment(var_types type, bool isFloatHfa); // VOM info, method sigs void eeGetSig(unsigned sigTok, CORINFO_MODULE_HANDLE scope, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO* retSig); void eeGetCallSiteSig(unsigned sigTok, CORINFO_MODULE_HANDLE scope, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO* retSig); void eeGetMethodSig(CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* retSig, CORINFO_CLASS_HANDLE owner = nullptr); // Method entry-points, instrs CORINFO_METHOD_HANDLE eeMarkNativeTarget(CORINFO_METHOD_HANDLE method); CORINFO_EE_INFO eeInfo; bool eeInfoInitialized; CORINFO_EE_INFO* eeGetEEInfo(); // Gets the offset of a SDArray's first element static unsigned eeGetArrayDataOffset(); // Get the offset of a MDArray's first element static unsigned eeGetMDArrayDataOffset(unsigned rank); // Get the offset of a MDArray's dimension length for a given dimension. static unsigned eeGetMDArrayLengthOffset(unsigned rank, unsigned dimension); // Get the offset of a MDArray's lower bound for a given dimension. static unsigned eeGetMDArrayLowerBoundOffset(unsigned rank, unsigned dimension); GenTree* eeGetPInvokeCookie(CORINFO_SIG_INFO* szMetaSig); // Returns the page size for the target machine as reported by the EE. target_size_t eeGetPageSize() { return (target_size_t)eeGetEEInfo()->osPageSize; } //------------------------------------------------------------------------ // VirtualStubParam: virtual stub dispatch extra parameter (slot address). // // It represents Abi and target specific registers for the parameter. // class VirtualStubParamInfo { public: VirtualStubParamInfo(bool isCoreRTABI) { #if defined(TARGET_X86) reg = REG_EAX; regMask = RBM_EAX; #elif defined(TARGET_AMD64) if (isCoreRTABI) { reg = REG_R10; regMask = RBM_R10; } else { reg = REG_R11; regMask = RBM_R11; } #elif defined(TARGET_ARM) if (isCoreRTABI) { reg = REG_R12; regMask = RBM_R12; } else { reg = REG_R4; regMask = RBM_R4; } #elif defined(TARGET_ARM64) reg = REG_R11; regMask = RBM_R11; #else #error Unsupported or unset target architecture #endif } regNumber GetReg() const { return reg; } _regMask_enum GetRegMask() const { return regMask; } private: regNumber reg; _regMask_enum regMask; }; VirtualStubParamInfo* virtualStubParamInfo; bool IsTargetAbi(CORINFO_RUNTIME_ABI abi) { return eeGetEEInfo()->targetAbi == abi; } bool generateCFIUnwindCodes() { #if defined(FEATURE_CFI_SUPPORT) return TargetOS::IsUnix && IsTargetAbi(CORINFO_CORERT_ABI); #else return false; #endif } // Debugging support - Line number info void eeGetStmtOffsets(); unsigned eeBoundariesCount; ICorDebugInfo::OffsetMapping* eeBoundaries; // Boundaries to report to the EE void eeSetLIcount(unsigned count); void eeSetLIinfo(unsigned which, UNATIVE_OFFSET offs, IPmappingDscKind kind, const ILLocation& loc); void eeSetLIdone(); #ifdef DEBUG static void eeDispILOffs(IL_OFFSET offs); static void eeDispSourceMappingOffs(uint32_t offs); static void eeDispLineInfo(const ICorDebugInfo::OffsetMapping* line); void eeDispLineInfos(); #endif // DEBUG // Debugging support - Local var info void eeGetVars(); unsigned eeVarsCount; struct VarResultInfo { UNATIVE_OFFSET startOffset; UNATIVE_OFFSET endOffset; DWORD varNumber; CodeGenInterface::siVarLoc loc; } * eeVars; void eeSetLVcount(unsigned count); void eeSetLVinfo(unsigned which, UNATIVE_OFFSET startOffs, UNATIVE_OFFSET length, unsigned varNum, const CodeGenInterface::siVarLoc& loc); void eeSetLVdone(); #ifdef DEBUG void eeDispVar(ICorDebugInfo::NativeVarInfo* var); void eeDispVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo::NativeVarInfo* vars); #endif // DEBUG // ICorJitInfo wrappers void eeReserveUnwindInfo(bool isFunclet, bool isColdCode, ULONG unwindSize); void eeAllocUnwindInfo(BYTE* pHotCode, BYTE* pColdCode, ULONG startOffset, ULONG endOffset, ULONG unwindSize, BYTE* pUnwindBlock, CorJitFuncKind funcKind); void eeSetEHcount(unsigned cEH); void eeSetEHinfo(unsigned EHnumber, const CORINFO_EH_CLAUSE* clause); WORD eeGetRelocTypeHint(void* target); // ICorStaticInfo wrapper functions bool eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken); #if defined(UNIX_AMD64_ABI) #ifdef DEBUG static void dumpSystemVClassificationType(SystemVClassificationType ct); #endif // DEBUG void eeGetSystemVAmd64PassStructInRegisterDescriptor( /*IN*/ CORINFO_CLASS_HANDLE structHnd, /*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr); #endif // UNIX_AMD64_ABI template <typename ParamType> bool eeRunWithErrorTrap(void (*function)(ParamType*), ParamType* param) { return eeRunWithErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param)); } bool eeRunWithErrorTrapImp(void (*function)(void*), void* param); template <typename ParamType> bool eeRunWithSPMIErrorTrap(void (*function)(ParamType*), ParamType* param) { return eeRunWithSPMIErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param)); } bool eeRunWithSPMIErrorTrapImp(void (*function)(void*), void* param); // Utility functions const char* eeGetFieldName(CORINFO_FIELD_HANDLE fieldHnd, const char** classNamePtr = nullptr); #if defined(DEBUG) const WCHAR* eeGetCPString(size_t stringHandle); #endif const char* eeGetClassName(CORINFO_CLASS_HANDLE clsHnd); static CORINFO_METHOD_HANDLE eeFindHelper(unsigned helper); static CorInfoHelpFunc eeGetHelperNum(CORINFO_METHOD_HANDLE method); static bool IsSharedStaticHelper(GenTree* tree); static bool IsGcSafePoint(GenTreeCall* call); static CORINFO_FIELD_HANDLE eeFindJitDataOffs(unsigned jitDataOffs); // returns true/false if 'field' is a Jit Data offset static bool eeIsJitDataOffs(CORINFO_FIELD_HANDLE field); // returns a number < 0 if 'field' is not a Jit Data offset, otherwise the data offset (limited to 2GB) static int eeGetJitDataOffs(CORINFO_FIELD_HANDLE field); /*****************************************************************************/ /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX CodeGenerator XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: CodeGenInterface* codeGen; // Record the instr offset mapping to the generated code jitstd::list<IPmappingDsc> genIPmappings; #ifdef DEBUG jitstd::list<PreciseIPMapping> genPreciseIPmappings; #endif // Managed RetVal - A side hash table meant to record the mapping from a // GT_CALL node to its debug info. This info is used to emit sequence points // that can be used by debugger to determine the native offset at which the // managed RetVal will be available. // // In fact we can store debug info in a GT_CALL node. This was ruled out in // favor of a side table for two reasons: 1) We need debug info for only those // GT_CALL nodes (created during importation) that correspond to an IL call and // whose return type is other than TYP_VOID. 2) GT_CALL node is a frequently used // structure and IL offset is needed only when generating debuggable code. Therefore // it is desirable to avoid memory size penalty in retail scenarios. typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, DebugInfo> CallSiteDebugInfoTable; CallSiteDebugInfoTable* genCallSite2DebugInfoMap; unsigned genReturnLocal; // Local number for the return value when applicable. BasicBlock* genReturnBB; // jumped to when not optimizing for speed. // The following properties are part of CodeGenContext. Getters are provided here for // convenience and backward compatibility, but the properties can only be set by invoking // the setter on CodeGenContext directly. emitter* GetEmitter() const { return codeGen->GetEmitter(); } bool isFramePointerUsed() const { return codeGen->isFramePointerUsed(); } bool GetInterruptible() { return codeGen->GetInterruptible(); } void SetInterruptible(bool value) { codeGen->SetInterruptible(value); } #if DOUBLE_ALIGN const bool genDoubleAlign() { return codeGen->doDoubleAlign(); } DWORD getCanDoubleAlign(); bool shouldDoubleAlign(unsigned refCntStk, unsigned refCntReg, weight_t refCntWtdReg, unsigned refCntStkParam, weight_t refCntWtdStkDbl); #endif // DOUBLE_ALIGN bool IsFullPtrRegMapRequired() { return codeGen->IsFullPtrRegMapRequired(); } void SetFullPtrRegMapRequired(bool value) { codeGen->SetFullPtrRegMapRequired(value); } // Things that MAY belong either in CodeGen or CodeGenContext #if defined(FEATURE_EH_FUNCLETS) FuncInfoDsc* compFuncInfos; unsigned short compCurrFuncIdx; unsigned short compFuncInfoCount; unsigned short compFuncCount() { assert(fgFuncletsCreated); return compFuncInfoCount; } #else // !FEATURE_EH_FUNCLETS // This is a no-op when there are no funclets! void genUpdateCurrentFunclet(BasicBlock* block) { return; } FuncInfoDsc compFuncInfoRoot; static const unsigned compCurrFuncIdx = 0; unsigned short compFuncCount() { return 1; } #endif // !FEATURE_EH_FUNCLETS FuncInfoDsc* funCurrentFunc(); void funSetCurrentFunc(unsigned funcIdx); FuncInfoDsc* funGetFunc(unsigned funcIdx); unsigned int funGetFuncIdx(BasicBlock* block); // LIVENESS VARSET_TP compCurLife; // current live variables GenTree* compCurLifeTree; // node after which compCurLife has been computed // Compare the given "newLife" with last set of live variables and update // codeGen "gcInfo", siScopes, "regSet" with the new variable's homes/liveness. template <bool ForCodeGen> void compChangeLife(VARSET_VALARG_TP newLife); // Update the GC's masks, register's masks and reports change on variable's homes given a set of // current live variables if changes have happened since "compCurLife". template <bool ForCodeGen> inline void compUpdateLife(VARSET_VALARG_TP newLife); // Gets a register mask that represent the kill set for a helper call since // not all JIT Helper calls follow the standard ABI on the target architecture. regMaskTP compHelperCallKillSet(CorInfoHelpFunc helper); #ifdef TARGET_ARM // Requires that "varDsc" be a promoted struct local variable being passed as an argument, beginning at // "firstArgRegNum", which is assumed to have already been aligned to the register alignment restriction of the // struct type. Adds bits to "*pArgSkippedRegMask" for any argument registers *not* used in passing "varDsc" -- // i.e., internal "holes" caused by internal alignment constraints. For example, if the struct contained an int and // a double, and we at R0 (on ARM), then R1 would be skipped, and the bit for R1 would be added to the mask. void fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, regMaskTP* pArgSkippedRegMask); #endif // TARGET_ARM // If "tree" is a indirection (GT_IND, or GT_OBJ) whose arg is an ADDR, whose arg is a LCL_VAR, return that LCL_VAR // node, else NULL. static GenTreeLclVar* fgIsIndirOfAddrOfLocal(GenTree* tree); // This map is indexed by GT_OBJ nodes that are address of promoted struct variables, which // have been annotated with the GTF_VAR_DEATH flag. If such a node is *not* mapped in this // table, one may assume that all the (tracked) field vars die at this GT_OBJ. Otherwise, // the node maps to a pointer to a VARSET_TP, containing set bits for each of the tracked field // vars of the promoted struct local that go dead at the given node (the set bits are the bits // for the tracked var indices of the field vars, as in a live var set). // // The map is allocated on demand so all map operations should use one of the following three // wrapper methods. NodeToVarsetPtrMap* m_promotedStructDeathVars; NodeToVarsetPtrMap* GetPromotedStructDeathVars() { if (m_promotedStructDeathVars == nullptr) { m_promotedStructDeathVars = new (getAllocator()) NodeToVarsetPtrMap(getAllocator()); } return m_promotedStructDeathVars; } void ClearPromotedStructDeathVars() { if (m_promotedStructDeathVars != nullptr) { m_promotedStructDeathVars->RemoveAll(); } } bool LookupPromotedStructDeathVars(GenTree* tree, VARSET_TP** bits) { *bits = nullptr; bool result = false; if (m_promotedStructDeathVars != nullptr) { result = m_promotedStructDeathVars->Lookup(tree, bits); } return result; } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX UnwindInfo XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #if !defined(__GNUC__) #pragma region Unwind information #endif public: // // Infrastructure functions: start/stop/reserve/emit. // void unwindBegProlog(); void unwindEndProlog(); void unwindBegEpilog(); void unwindEndEpilog(); void unwindReserve(); void unwindEmit(void* pHotCode, void* pColdCode); // // Specific unwind information functions: called by code generation to indicate a particular // prolog or epilog unwindable instruction has been generated. // void unwindPush(regNumber reg); void unwindAllocStack(unsigned size); void unwindSetFrameReg(regNumber reg, unsigned offset); void unwindSaveReg(regNumber reg, unsigned offset); #if defined(TARGET_ARM) void unwindPushMaskInt(regMaskTP mask); void unwindPushMaskFloat(regMaskTP mask); void unwindPopMaskInt(regMaskTP mask); void unwindPopMaskFloat(regMaskTP mask); void unwindBranch16(); // The epilog terminates with a 16-bit branch (e.g., "bx lr") void unwindNop(unsigned codeSizeInBytes); // Generate unwind NOP code. 'codeSizeInBytes' is 2 or 4 bytes. Only // called via unwindPadding(). void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last // instruction and the current location. #endif // TARGET_ARM #if defined(TARGET_ARM64) void unwindNop(); void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last // instruction and the current location. void unwindSaveReg(regNumber reg, int offset); // str reg, [sp, #offset] void unwindSaveRegPreindexed(regNumber reg, int offset); // str reg, [sp, #offset]! void unwindSaveRegPair(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset] void unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]! void unwindSaveNext(); // unwind code: save_next void unwindReturn(regNumber reg); // ret lr #endif // defined(TARGET_ARM64) // // Private "helper" functions for the unwind implementation. // private: #if defined(FEATURE_EH_FUNCLETS) void unwindGetFuncLocations(FuncInfoDsc* func, bool getHotSectionData, /* OUT */ emitLocation** ppStartLoc, /* OUT */ emitLocation** ppEndLoc); #endif // FEATURE_EH_FUNCLETS void unwindReserveFunc(FuncInfoDsc* func); void unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode); #if defined(TARGET_AMD64) || (defined(TARGET_X86) && defined(FEATURE_EH_FUNCLETS)) void unwindReserveFuncHelper(FuncInfoDsc* func, bool isHotCode); void unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pColdCode, bool isHotCode); #endif // TARGET_AMD64 || (TARGET_X86 && FEATURE_EH_FUNCLETS) UNATIVE_OFFSET unwindGetCurrentOffset(FuncInfoDsc* func); #if defined(TARGET_AMD64) void unwindBegPrologWindows(); void unwindPushWindows(regNumber reg); void unwindAllocStackWindows(unsigned size); void unwindSetFrameRegWindows(regNumber reg, unsigned offset); void unwindSaveRegWindows(regNumber reg, unsigned offset); #ifdef UNIX_AMD64_ABI void unwindSaveRegCFI(regNumber reg, unsigned offset); #endif // UNIX_AMD64_ABI #elif defined(TARGET_ARM) void unwindPushPopMaskInt(regMaskTP mask, bool useOpsize16); void unwindPushPopMaskFloat(regMaskTP mask); #endif // TARGET_ARM #if defined(FEATURE_CFI_SUPPORT) short mapRegNumToDwarfReg(regNumber reg); void createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR opcode, short dwarfReg, INT offset = 0); void unwindPushPopCFI(regNumber reg); void unwindBegPrologCFI(); void unwindPushPopMaskCFI(regMaskTP regMask, bool isFloat); void unwindAllocStackCFI(unsigned size); void unwindSetFrameRegCFI(regNumber reg, unsigned offset); void unwindEmitFuncCFI(FuncInfoDsc* func, void* pHotCode, void* pColdCode); #ifdef DEBUG void DumpCfiInfo(bool isHotCode, UNATIVE_OFFSET startOffset, UNATIVE_OFFSET endOffset, DWORD cfiCodeBytes, const CFI_CODE* const pCfiCode); #endif #endif // FEATURE_CFI_SUPPORT #if !defined(__GNUC__) #pragma endregion // Note: region is NOT under !defined(__GNUC__) #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX SIMD XX XX XX XX Info about SIMD types, methods and the SIMD assembly (i.e. the assembly XX XX that contains the distinguished, well-known SIMD type definitions). XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ bool IsBaselineSimdIsaSupported() { #ifdef FEATURE_SIMD #if defined(TARGET_XARCH) CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2; #elif defined(TARGET_ARM64) CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd; #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 return compOpportunisticallyDependsOn(minimumIsa); #else return false; #endif } #if defined(DEBUG) bool IsBaselineSimdIsaSupportedDebugOnly() { #ifdef FEATURE_SIMD #if defined(TARGET_XARCH) CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2; #elif defined(TARGET_ARM64) CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd; #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 return compIsaSupportedDebugOnly(minimumIsa); #else return false; #endif // FEATURE_SIMD } #endif // DEBUG // Get highest available level for SIMD codegen SIMDLevel getSIMDSupportLevel() { #if defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_AVX2)) { return SIMD_AVX2_Supported; } if (compOpportunisticallyDependsOn(InstructionSet_SSE42)) { return SIMD_SSE4_Supported; } // min bar is SSE2 return SIMD_SSE2_Supported; #else assert(!"Available instruction set(s) for SIMD codegen is not defined for target arch"); unreached(); return SIMD_Not_Supported; #endif } bool isIntrinsicType(CORINFO_CLASS_HANDLE clsHnd) { return info.compCompHnd->isIntrinsicType(clsHnd); } const char* getClassNameFromMetadata(CORINFO_CLASS_HANDLE cls, const char** namespaceName) { return info.compCompHnd->getClassNameFromMetadata(cls, namespaceName); } CORINFO_CLASS_HANDLE getTypeInstantiationArgument(CORINFO_CLASS_HANDLE cls, unsigned index) { return info.compCompHnd->getTypeInstantiationArgument(cls, index); } #ifdef FEATURE_SIMD // Should we support SIMD intrinsics? bool featureSIMD; // Should we recognize SIMD types? // We always do this on ARM64 to support HVA types. bool supportSIMDTypes() { #ifdef TARGET_ARM64 return true; #else return featureSIMD; #endif } // Have we identified any SIMD types? // This is currently used by struct promotion to avoid getting type information for a struct // field to see if it is a SIMD type, if we haven't seen any SIMD types or operations in // the method. bool _usesSIMDTypes; bool usesSIMDTypes() { return _usesSIMDTypes; } void setUsesSIMDTypes(bool value) { _usesSIMDTypes = value; } // This is a temp lclVar allocated on the stack as TYP_SIMD. It is used to implement intrinsics // that require indexed access to the individual fields of the vector, which is not well supported // by the hardware. It is allocated when/if such situations are encountered during Lowering. unsigned lvaSIMDInitTempVarNum; struct SIMDHandlesCache { // SIMD Types CORINFO_CLASS_HANDLE SIMDFloatHandle; CORINFO_CLASS_HANDLE SIMDDoubleHandle; CORINFO_CLASS_HANDLE SIMDIntHandle; CORINFO_CLASS_HANDLE SIMDUShortHandle; CORINFO_CLASS_HANDLE SIMDUByteHandle; CORINFO_CLASS_HANDLE SIMDShortHandle; CORINFO_CLASS_HANDLE SIMDByteHandle; CORINFO_CLASS_HANDLE SIMDLongHandle; CORINFO_CLASS_HANDLE SIMDUIntHandle; CORINFO_CLASS_HANDLE SIMDULongHandle; CORINFO_CLASS_HANDLE SIMDNIntHandle; CORINFO_CLASS_HANDLE SIMDNUIntHandle; CORINFO_CLASS_HANDLE SIMDVector2Handle; CORINFO_CLASS_HANDLE SIMDVector3Handle; CORINFO_CLASS_HANDLE SIMDVector4Handle; CORINFO_CLASS_HANDLE SIMDVectorHandle; #ifdef FEATURE_HW_INTRINSICS #if defined(TARGET_ARM64) CORINFO_CLASS_HANDLE Vector64FloatHandle; CORINFO_CLASS_HANDLE Vector64DoubleHandle; CORINFO_CLASS_HANDLE Vector64IntHandle; CORINFO_CLASS_HANDLE Vector64UShortHandle; CORINFO_CLASS_HANDLE Vector64UByteHandle; CORINFO_CLASS_HANDLE Vector64ShortHandle; CORINFO_CLASS_HANDLE Vector64ByteHandle; CORINFO_CLASS_HANDLE Vector64LongHandle; CORINFO_CLASS_HANDLE Vector64UIntHandle; CORINFO_CLASS_HANDLE Vector64ULongHandle; CORINFO_CLASS_HANDLE Vector64NIntHandle; CORINFO_CLASS_HANDLE Vector64NUIntHandle; #endif // defined(TARGET_ARM64) CORINFO_CLASS_HANDLE Vector128FloatHandle; CORINFO_CLASS_HANDLE Vector128DoubleHandle; CORINFO_CLASS_HANDLE Vector128IntHandle; CORINFO_CLASS_HANDLE Vector128UShortHandle; CORINFO_CLASS_HANDLE Vector128UByteHandle; CORINFO_CLASS_HANDLE Vector128ShortHandle; CORINFO_CLASS_HANDLE Vector128ByteHandle; CORINFO_CLASS_HANDLE Vector128LongHandle; CORINFO_CLASS_HANDLE Vector128UIntHandle; CORINFO_CLASS_HANDLE Vector128ULongHandle; CORINFO_CLASS_HANDLE Vector128NIntHandle; CORINFO_CLASS_HANDLE Vector128NUIntHandle; #if defined(TARGET_XARCH) CORINFO_CLASS_HANDLE Vector256FloatHandle; CORINFO_CLASS_HANDLE Vector256DoubleHandle; CORINFO_CLASS_HANDLE Vector256IntHandle; CORINFO_CLASS_HANDLE Vector256UShortHandle; CORINFO_CLASS_HANDLE Vector256UByteHandle; CORINFO_CLASS_HANDLE Vector256ShortHandle; CORINFO_CLASS_HANDLE Vector256ByteHandle; CORINFO_CLASS_HANDLE Vector256LongHandle; CORINFO_CLASS_HANDLE Vector256UIntHandle; CORINFO_CLASS_HANDLE Vector256ULongHandle; CORINFO_CLASS_HANDLE Vector256NIntHandle; CORINFO_CLASS_HANDLE Vector256NUIntHandle; #endif // defined(TARGET_XARCH) #endif // FEATURE_HW_INTRINSICS SIMDHandlesCache() { memset(this, 0, sizeof(*this)); } }; SIMDHandlesCache* m_simdHandleCache; // Get an appropriate "zero" for the given type and class handle. GenTree* gtGetSIMDZero(var_types simdType, CorInfoType simdBaseJitType, CORINFO_CLASS_HANDLE simdHandle); // Get the handle for a SIMD type. CORINFO_CLASS_HANDLE gtGetStructHandleForSIMD(var_types simdType, CorInfoType simdBaseJitType) { if (m_simdHandleCache == nullptr) { // This may happen if the JIT generates SIMD node on its own, without importing them. // Otherwise getBaseJitTypeAndSizeOfSIMDType should have created the cache. return NO_CLASS_HANDLE; } if (simdBaseJitType == CORINFO_TYPE_FLOAT) { switch (simdType) { case TYP_SIMD8: return m_simdHandleCache->SIMDVector2Handle; case TYP_SIMD12: return m_simdHandleCache->SIMDVector3Handle; case TYP_SIMD16: if ((getSIMDVectorType() == TYP_SIMD32) || (m_simdHandleCache->SIMDVector4Handle != NO_CLASS_HANDLE)) { return m_simdHandleCache->SIMDVector4Handle; } break; case TYP_SIMD32: break; default: unreached(); } } assert(emitTypeSize(simdType) <= largestEnregisterableStructSize()); switch (simdBaseJitType) { case CORINFO_TYPE_FLOAT: return m_simdHandleCache->SIMDFloatHandle; case CORINFO_TYPE_DOUBLE: return m_simdHandleCache->SIMDDoubleHandle; case CORINFO_TYPE_INT: return m_simdHandleCache->SIMDIntHandle; case CORINFO_TYPE_USHORT: return m_simdHandleCache->SIMDUShortHandle; case CORINFO_TYPE_UBYTE: return m_simdHandleCache->SIMDUByteHandle; case CORINFO_TYPE_SHORT: return m_simdHandleCache->SIMDShortHandle; case CORINFO_TYPE_BYTE: return m_simdHandleCache->SIMDByteHandle; case CORINFO_TYPE_LONG: return m_simdHandleCache->SIMDLongHandle; case CORINFO_TYPE_UINT: return m_simdHandleCache->SIMDUIntHandle; case CORINFO_TYPE_ULONG: return m_simdHandleCache->SIMDULongHandle; case CORINFO_TYPE_NATIVEINT: return m_simdHandleCache->SIMDNIntHandle; case CORINFO_TYPE_NATIVEUINT: return m_simdHandleCache->SIMDNUIntHandle; default: assert(!"Didn't find a class handle for simdType"); } return NO_CLASS_HANDLE; } // Returns true if this is a SIMD type that should be considered an opaque // vector type (i.e. do not analyze or promote its fields). // Note that all but the fixed vector types are opaque, even though they may // actually be declared as having fields. bool isOpaqueSIMDType(CORINFO_CLASS_HANDLE structHandle) const { return ((m_simdHandleCache != nullptr) && (structHandle != m_simdHandleCache->SIMDVector2Handle) && (structHandle != m_simdHandleCache->SIMDVector3Handle) && (structHandle != m_simdHandleCache->SIMDVector4Handle)); } // Returns true if the tree corresponds to a TYP_SIMD lcl var. // Note that both SIMD vector args and locals are mared as lvSIMDType = true, but // type of an arg node is TYP_BYREF and a local node is TYP_SIMD or TYP_STRUCT. bool isSIMDTypeLocal(GenTree* tree) { return tree->OperIsLocal() && lvaGetDesc(tree->AsLclVarCommon())->lvSIMDType; } // Returns true if the lclVar is an opaque SIMD type. bool isOpaqueSIMDLclVar(const LclVarDsc* varDsc) const { if (!varDsc->lvSIMDType) { return false; } return isOpaqueSIMDType(varDsc->GetStructHnd()); } static bool isRelOpSIMDIntrinsic(SIMDIntrinsicID intrinsicId) { return (intrinsicId == SIMDIntrinsicEqual); } // Returns base JIT type of a TYP_SIMD local. // Returns CORINFO_TYPE_UNDEF if the local is not TYP_SIMD. CorInfoType getBaseJitTypeOfSIMDLocal(GenTree* tree) { if (isSIMDTypeLocal(tree)) { return lvaGetDesc(tree->AsLclVarCommon())->GetSimdBaseJitType(); } return CORINFO_TYPE_UNDEF; } bool isSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { if (isIntrinsicType(clsHnd)) { const char* namespaceName = nullptr; (void)getClassNameFromMetadata(clsHnd, &namespaceName); return strcmp(namespaceName, "System.Numerics") == 0; } return false; } bool isSIMDClass(typeInfo* pTypeInfo) { return pTypeInfo->IsStruct() && isSIMDClass(pTypeInfo->GetClassHandleForValueClass()); } bool isHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { #ifdef FEATURE_HW_INTRINSICS if (isIntrinsicType(clsHnd)) { const char* namespaceName = nullptr; (void)getClassNameFromMetadata(clsHnd, &namespaceName); return strcmp(namespaceName, "System.Runtime.Intrinsics") == 0; } #endif // FEATURE_HW_INTRINSICS return false; } bool isHWSIMDClass(typeInfo* pTypeInfo) { #ifdef FEATURE_HW_INTRINSICS return pTypeInfo->IsStruct() && isHWSIMDClass(pTypeInfo->GetClassHandleForValueClass()); #else return false; #endif } bool isSIMDorHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { return isSIMDClass(clsHnd) || isHWSIMDClass(clsHnd); } bool isSIMDorHWSIMDClass(typeInfo* pTypeInfo) { return isSIMDClass(pTypeInfo) || isHWSIMDClass(pTypeInfo); } // Get the base (element) type and size in bytes for a SIMD type. Returns CORINFO_TYPE_UNDEF // if it is not a SIMD type or is an unsupported base JIT type. CorInfoType getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes = nullptr); CorInfoType getBaseJitTypeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd) { return getBaseJitTypeAndSizeOfSIMDType(typeHnd, nullptr); } // Get SIMD Intrinsic info given the method handle. // Also sets typeHnd, argCount, baseType and sizeBytes out params. const SIMDIntrinsicInfo* getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* typeHnd, CORINFO_METHOD_HANDLE methodHnd, CORINFO_SIG_INFO* sig, bool isNewObj, unsigned* argCount, CorInfoType* simdBaseJitType, unsigned* sizeBytes); // Pops and returns GenTree node from importers type stack. // Normalizes TYP_STRUCT value in case of GT_CALL, GT_RET_EXPR and arg nodes. GenTree* impSIMDPopStack(var_types type, bool expectAddr = false, CORINFO_CLASS_HANDLE structType = nullptr); // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain given relop result. SIMDIntrinsicID impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId, CORINFO_CLASS_HANDLE typeHnd, unsigned simdVectorSize, CorInfoType* inOutBaseJitType, GenTree** op1, GenTree** op2); #if defined(TARGET_XARCH) // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain == comparison result. SIMDIntrinsicID impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd, unsigned simdVectorSize, GenTree** op1, GenTree** op2); #endif // defined(TARGET_XARCH) void setLclRelatedToSIMDIntrinsic(GenTree* tree); bool areFieldsContiguous(GenTree* op1, GenTree* op2); bool areLocalFieldsContiguous(GenTreeLclFld* first, GenTreeLclFld* second); bool areArrayElementsContiguous(GenTree* op1, GenTree* op2); bool areArgumentsContiguous(GenTree* op1, GenTree* op2); GenTree* createAddressNodeForSIMDInit(GenTree* tree, unsigned simdSize); // check methodHnd to see if it is a SIMD method that is expanded as an intrinsic in the JIT. GenTree* impSIMDIntrinsic(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef); GenTree* getOp1ForConstructor(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd); // Whether SIMD vector occupies part of SIMD register. // SSE2: vector2f/3f are considered sub register SIMD types. // AVX: vector2f, 3f and 4f are all considered sub register SIMD types. bool isSubRegisterSIMDType(GenTreeSIMD* simdNode) { unsigned vectorRegisterByteLength; #if defined(TARGET_XARCH) // Calling the getSIMDVectorRegisterByteLength api causes the size of Vector<T> to be recorded // with the AOT compiler, so that it cannot change from aot compilation time to runtime // This api does not require such fixing as it merely pertains to the size of the simd type // relative to the Vector<T> size as used at compile time. (So detecting a vector length of 16 here // does not preclude the code from being used on a machine with a larger vector length.) if (getSIMDSupportLevel() < SIMD_AVX2_Supported) { vectorRegisterByteLength = 16; } else { vectorRegisterByteLength = 32; } #else vectorRegisterByteLength = getSIMDVectorRegisterByteLength(); #endif return (simdNode->GetSimdSize() < vectorRegisterByteLength); } // Get the type for the hardware SIMD vector. // This is the maximum SIMD type supported for this target. var_types getSIMDVectorType() { #if defined(TARGET_XARCH) if (getSIMDSupportLevel() == SIMD_AVX2_Supported) { return TYP_SIMD32; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return TYP_SIMD16; } #elif defined(TARGET_ARM64) return TYP_SIMD16; #else assert(!"getSIMDVectorType() unimplemented on target arch"); unreached(); #endif } // Get the size of the SIMD type in bytes int getSIMDTypeSizeInBytes(CORINFO_CLASS_HANDLE typeHnd) { unsigned sizeBytes = 0; (void)getBaseJitTypeAndSizeOfSIMDType(typeHnd, &sizeBytes); return sizeBytes; } // Get the the number of elements of baseType of SIMD vector given by its size and baseType static int getSIMDVectorLength(unsigned simdSize, var_types baseType); // Get the the number of elements of baseType of SIMD vector given by its type handle int getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd); // Get preferred alignment of SIMD type. int getSIMDTypeAlignment(var_types simdType); // Get the number of bytes in a System.Numeric.Vector<T> for the current compilation. // Note - cannot be used for System.Runtime.Intrinsic unsigned getSIMDVectorRegisterByteLength() { #if defined(TARGET_XARCH) if (getSIMDSupportLevel() == SIMD_AVX2_Supported) { return YMM_REGSIZE_BYTES; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return XMM_REGSIZE_BYTES; } #elif defined(TARGET_ARM64) return FP_REGSIZE_BYTES; #else assert(!"getSIMDVectorRegisterByteLength() unimplemented on target arch"); unreached(); #endif } // The minimum and maximum possible number of bytes in a SIMD vector. // maxSIMDStructBytes // The minimum SIMD size supported by System.Numeric.Vectors or System.Runtime.Intrinsic // SSE: 16-byte Vector<T> and Vector128<T> // AVX: 32-byte Vector256<T> (Vector<T> is 16-byte) // AVX2: 32-byte Vector<T> and Vector256<T> unsigned int maxSIMDStructBytes() { #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_AVX)) { return YMM_REGSIZE_BYTES; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return XMM_REGSIZE_BYTES; } #else return getSIMDVectorRegisterByteLength(); #endif } unsigned int minSIMDStructBytes() { return emitTypeSize(TYP_SIMD8); } public: // Returns the codegen type for a given SIMD size. static var_types getSIMDTypeForSize(unsigned size) { var_types simdType = TYP_UNDEF; if (size == 8) { simdType = TYP_SIMD8; } else if (size == 12) { simdType = TYP_SIMD12; } else if (size == 16) { simdType = TYP_SIMD16; } else if (size == 32) { simdType = TYP_SIMD32; } else { noway_assert(!"Unexpected size for SIMD type"); } return simdType; } private: unsigned getSIMDInitTempVarNum(var_types simdType); #else // !FEATURE_SIMD bool isOpaqueSIMDLclVar(LclVarDsc* varDsc) { return false; } #endif // FEATURE_SIMD public: //------------------------------------------------------------------------ // largestEnregisterableStruct: The size in bytes of the largest struct that can be enregistered. // // Notes: It is not guaranteed that the struct of this size or smaller WILL be a // candidate for enregistration. unsigned largestEnregisterableStructSize() { #ifdef FEATURE_SIMD #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) if (opts.IsReadyToRun()) { // Return constant instead of maxSIMDStructBytes, as maxSIMDStructBytes performs // checks that are effected by the current level of instruction set support would // otherwise cause the highest level of instruction set support to be reported to crossgen2. // and this api is only ever used as an optimization or assert, so no reporting should // ever happen. return YMM_REGSIZE_BYTES; } #endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) unsigned vectorRegSize = maxSIMDStructBytes(); assert(vectorRegSize >= TARGET_POINTER_SIZE); return vectorRegSize; #else // !FEATURE_SIMD return TARGET_POINTER_SIZE; #endif // !FEATURE_SIMD } // Use to determine if a struct *might* be a SIMD type. As this function only takes a size, many // structs will fit the criteria. bool structSizeMightRepresentSIMDType(size_t structSize) { #ifdef FEATURE_SIMD // Do not use maxSIMDStructBytes as that api in R2R on X86 and X64 may notify the JIT // about the size of a struct under the assumption that the struct size needs to be recorded. // By using largestEnregisterableStructSize here, the detail of whether or not Vector256<T> is // enregistered or not will not be messaged to the R2R compiler. return (structSize >= minSIMDStructBytes()) && (structSize <= largestEnregisterableStructSize()); #else return false; #endif // FEATURE_SIMD } #ifdef FEATURE_SIMD static bool vnEncodesResultTypeForSIMDIntrinsic(SIMDIntrinsicID intrinsicId); #endif // !FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS static bool vnEncodesResultTypeForHWIntrinsic(NamedIntrinsic hwIntrinsicID); #endif // FEATURE_HW_INTRINSICS private: // These routines need not be enclosed under FEATURE_SIMD since lvIsSIMDType() // is defined for both FEATURE_SIMD and !FEATURE_SIMD apropriately. The use // of this routines also avoids the need of #ifdef FEATURE_SIMD specific code. // Is this var is of type simd struct? bool lclVarIsSIMDType(unsigned varNum) { return lvaGetDesc(varNum)->lvIsSIMDType(); } // Is this Local node a SIMD local? bool lclVarIsSIMDType(GenTreeLclVarCommon* lclVarTree) { return lclVarIsSIMDType(lclVarTree->GetLclNum()); } // Returns true if the TYP_SIMD locals on stack are aligned at their // preferred byte boundary specified by getSIMDTypeAlignment(). // // As per the Intel manual, the preferred alignment for AVX vectors is // 32-bytes. It is not clear whether additional stack space used in // aligning stack is worth the benefit and for now will use 16-byte // alignment for AVX 256-bit vectors with unaligned load/stores to/from // memory. On x86, the stack frame is aligned to 4 bytes. We need to extend // existing support for double (8-byte) alignment to 16 or 32 byte // alignment for frames with local SIMD vars, if that is determined to be // profitable. // // On Amd64 and SysV, RSP+8 is aligned on entry to the function (before // prolog has run). This means that in RBP-based frames RBP will be 16-byte // aligned. For RSP-based frames these are only sometimes aligned, depending // on the frame size. // bool isSIMDTypeLocalAligned(unsigned varNum) { #if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES if (lclVarIsSIMDType(varNum) && lvaTable[varNum].lvType != TYP_BYREF) { // TODO-Cleanup: Can't this use the lvExactSize on the varDsc? int alignment = getSIMDTypeAlignment(lvaTable[varNum].lvType); if (alignment <= STACK_ALIGN) { bool rbpBased; int off = lvaFrameAddress(varNum, &rbpBased); // On SysV and Winx64 ABIs RSP+8 will be 16-byte aligned at the // first instruction of a function. If our frame is RBP based // then RBP will always be 16 bytes aligned, so we can simply // check the offset. if (rbpBased) { return (off % alignment) == 0; } // For RSP-based frame the alignment of RSP depends on our // locals. rsp+8 is aligned on entry and we just subtract frame // size so it is not hard to compute. Note that the compiler // tries hard to make sure the frame size means RSP will be // 16-byte aligned, but for leaf functions without locals (i.e. // frameSize = 0) it will not be. int frameSize = codeGen->genTotalFrameSize(); return ((8 - frameSize + off) % alignment) == 0; } } #endif // FEATURE_SIMD return false; } #ifdef DEBUG // Answer the question: Is a particular ISA supported? // Use this api when asking the question so that future // ISA questions can be asked correctly or when asserting // support/nonsupport for an instruction set bool compIsaSupportedDebugOnly(CORINFO_InstructionSet isa) const { #if defined(TARGET_XARCH) || defined(TARGET_ARM64) return (opts.compSupportsISA & (1ULL << isa)) != 0; #else return false; #endif } #endif // DEBUG bool notifyInstructionSetUsage(CORINFO_InstructionSet isa, bool supported) const; // Answer the question: Is a particular ISA allowed to be used implicitly by optimizations? // The result of this api call will exactly match the target machine // on which the function is executed (except for CoreLib, where there are special rules) bool compExactlyDependsOn(CORINFO_InstructionSet isa) const { #if defined(TARGET_XARCH) || defined(TARGET_ARM64) uint64_t isaBit = (1ULL << isa); if ((opts.compSupportsISAReported & isaBit) == 0) { if (notifyInstructionSetUsage(isa, (opts.compSupportsISA & isaBit) != 0)) ((Compiler*)this)->opts.compSupportsISAExactly |= isaBit; ((Compiler*)this)->opts.compSupportsISAReported |= isaBit; } return (opts.compSupportsISAExactly & isaBit) != 0; #else return false; #endif } // Ensure that code will not execute if an instruction set is usable. Call only // if the instruction set has previously reported as unusable, but when // that that status has not yet been recorded to the AOT compiler void compVerifyInstructionSetUnusable(CORINFO_InstructionSet isa) { // use compExactlyDependsOn to capture are record the use of the isa bool isaUsable = compExactlyDependsOn(isa); // Assert that the is unusable. If true, this function should never be called. assert(!isaUsable); } // Answer the question: Is a particular ISA allowed to be used implicitly by optimizations? // The result of this api call will match the target machine if the result is true // If the result is false, then the target machine may have support for the instruction bool compOpportunisticallyDependsOn(CORINFO_InstructionSet isa) const { if ((opts.compSupportsISA & (1ULL << isa)) != 0) { return compExactlyDependsOn(isa); } else { return false; } } // Answer the question: Is a particular ISA supported for explicit hardware intrinsics? bool compHWIntrinsicDependsOn(CORINFO_InstructionSet isa) const { // Report intent to use the ISA to the EE compExactlyDependsOn(isa); return ((opts.compSupportsISA & (1ULL << isa)) != 0); } bool canUseVexEncoding() const { #ifdef TARGET_XARCH return compOpportunisticallyDependsOn(InstructionSet_AVX); #else return false; #endif } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XX Generic info about the compilation and the method being compiled. XX XX It is responsible for driving the other phases. XX XX It is also responsible for all the memory management. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: Compiler* InlineeCompiler; // The Compiler instance for the inlinee InlineResult* compInlineResult; // The result of importing the inlinee method. bool compDoAggressiveInlining; // If true, mark every method as CORINFO_FLG_FORCEINLINE bool compJmpOpUsed; // Does the method do a JMP bool compLongUsed; // Does the method use TYP_LONG bool compFloatingPointUsed; // Does the method use TYP_FLOAT or TYP_DOUBLE bool compTailCallUsed; // Does the method do a tailcall bool compTailPrefixSeen; // Does the method IL have tail. prefix bool compLocallocSeen; // Does the method IL have localloc opcode bool compLocallocUsed; // Does the method use localloc. bool compLocallocOptimized; // Does the method have an optimized localloc bool compQmarkUsed; // Does the method use GT_QMARK/GT_COLON bool compQmarkRationalized; // Is it allowed to use a GT_QMARK/GT_COLON node. bool compHasBackwardJump; // Does the method (or some inlinee) have a lexically backwards jump? bool compHasBackwardJumpInHandler; // Does the method have a lexically backwards jump in a handler? bool compSwitchedToOptimized; // Codegen initially was Tier0 but jit switched to FullOpts bool compSwitchedToMinOpts; // Codegen initially was Tier1/FullOpts but jit switched to MinOpts bool compSuppressedZeroInit; // There are vars with lvSuppressedZeroInit set // NOTE: These values are only reliable after // the importing is completely finished. #ifdef DEBUG // State information - which phases have completed? // These are kept together for easy discoverability bool bRangeAllowStress; bool compCodeGenDone; int64_t compNumStatementLinksTraversed; // # of links traversed while doing debug checks bool fgNormalizeEHDone; // Has the flowgraph EH normalization phase been done? size_t compSizeEstimate; // The estimated size of the method as per `gtSetEvalOrder`. size_t compCycleEstimate; // The estimated cycle count of the method as per `gtSetEvalOrder` #endif // DEBUG bool fgLocalVarLivenessDone; // Note that this one is used outside of debug. bool fgLocalVarLivenessChanged; bool compLSRADone; bool compRationalIRForm; bool compUsesThrowHelper; // There is a call to a THROW_HELPER for the compiled method. bool compGeneratingProlog; bool compGeneratingEpilog; bool compNeedsGSSecurityCookie; // There is an unsafe buffer (or localloc) on the stack. // Insert cookie on frame and code to check the cookie, like VC++ -GS. bool compGSReorderStackLayout; // There is an unsafe buffer on the stack, reorder locals and make local // copies of susceptible parameters to avoid buffer overrun attacks through locals/params bool getNeedsGSSecurityCookie() const { return compNeedsGSSecurityCookie; } void setNeedsGSSecurityCookie() { compNeedsGSSecurityCookie = true; } FrameLayoutState lvaDoneFrameLayout; // The highest frame layout state that we've completed. During // frame layout calculations, this is the level we are currently // computing. //---------------------------- JITing options ----------------------------- enum codeOptimize { BLENDED_CODE, SMALL_CODE, FAST_CODE, COUNT_OPT_CODE }; struct Options { JitFlags* jitFlags; // all flags passed from the EE // The instruction sets that the compiler is allowed to emit. uint64_t compSupportsISA; // The instruction sets that were reported to the VM as being used by the current method. Subset of // compSupportsISA. uint64_t compSupportsISAReported; // The instruction sets that the compiler is allowed to take advantage of implicitly during optimizations. // Subset of compSupportsISA. // The instruction sets available in compSupportsISA and not available in compSupportsISAExactly can be only // used via explicit hardware intrinsics. uint64_t compSupportsISAExactly; void setSupportedISAs(CORINFO_InstructionSetFlags isas) { compSupportsISA = isas.GetFlagsRaw(); } unsigned compFlags; // method attributes unsigned instrCount; unsigned lvRefCount; codeOptimize compCodeOpt; // what type of code optimizations bool compUseCMOV; // optimize maximally and/or favor speed over size? #define DEFAULT_MIN_OPTS_CODE_SIZE 60000 #define DEFAULT_MIN_OPTS_INSTR_COUNT 20000 #define DEFAULT_MIN_OPTS_BB_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_NUM_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_REF_COUNT 8000 // Maximun number of locals before turning off the inlining #define MAX_LV_NUM_COUNT_FOR_INLINING 512 bool compMinOpts; bool compMinOptsIsSet; #ifdef DEBUG mutable bool compMinOptsIsUsed; bool MinOpts() const { assert(compMinOptsIsSet); compMinOptsIsUsed = true; return compMinOpts; } bool IsMinOptsSet() const { return compMinOptsIsSet; } #else // !DEBUG bool MinOpts() const { return compMinOpts; } bool IsMinOptsSet() const { return compMinOptsIsSet; } #endif // !DEBUG bool OptimizationDisabled() const { return MinOpts() || compDbgCode; } bool OptimizationEnabled() const { return !OptimizationDisabled(); } void SetMinOpts(bool val) { assert(!compMinOptsIsUsed); assert(!compMinOptsIsSet || (compMinOpts == val)); compMinOpts = val; compMinOptsIsSet = true; } // true if the CLFLG_* for an optimization is set. bool OptEnabled(unsigned optFlag) const { return !!(compFlags & optFlag); } #ifdef FEATURE_READYTORUN bool IsReadyToRun() const { return jitFlags->IsSet(JitFlags::JIT_FLAG_READYTORUN); } #else bool IsReadyToRun() const { return false; } #endif // Check if the compilation is control-flow guard enabled. bool IsCFGEnabled() const { #if defined(TARGET_ARM64) || defined(TARGET_AMD64) // On these platforms we assume the register that the target is // passed in is preserved by the validator and take care to get the // target from the register for the call (even in debug mode). static_assert_no_msg((RBM_VALIDATE_INDIRECT_CALL_TRASH & (1 << REG_VALIDATE_INDIRECT_CALL_ADDR)) == 0); if (JitConfig.JitForceControlFlowGuard()) return true; return jitFlags->IsSet(JitFlags::JIT_FLAG_ENABLE_CFG); #else // The remaining platforms are not supported and would require some // work to support. // // ARM32: // The ARM32 validator does not preserve any volatile registers // which means we have to take special care to allocate and use a // callee-saved register (reloading the target from memory is a // security issue). // // x86: // On x86 some VSD calls disassemble the call site and expect an // indirect call which is fundamentally incompatible with CFG. // This would require a different way to pass this information // through. // return false; #endif } #ifdef FEATURE_ON_STACK_REPLACEMENT bool IsOSR() const { return jitFlags->IsSet(JitFlags::JIT_FLAG_OSR); } #else bool IsOSR() const { return false; } #endif // true if we should use the PINVOKE_{BEGIN,END} helpers instead of generating // PInvoke transitions inline. Normally used by R2R, but also used when generating a reverse pinvoke frame, as // the current logic for frame setup initializes and pushes // the InlinedCallFrame before performing the Reverse PInvoke transition, which is invalid (as frames cannot // safely be pushed/popped while the thread is in a preemptive state.). bool ShouldUsePInvokeHelpers() { return jitFlags->IsSet(JitFlags::JIT_FLAG_USE_PINVOKE_HELPERS) || jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE); } // true if we should use insert the REVERSE_PINVOKE_{ENTER,EXIT} helpers in the method // prolog/epilog bool IsReversePInvoke() { return jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE); } bool compScopeInfo; // Generate the LocalVar info ? bool compDbgCode; // Generate debugger-friendly code? bool compDbgInfo; // Gather debugging info? bool compDbgEnC; #ifdef PROFILING_SUPPORTED bool compNoPInvokeInlineCB; #else static const bool compNoPInvokeInlineCB; #endif #ifdef DEBUG bool compGcChecks; // Check arguments and return values to ensure they are sane #endif #if defined(DEBUG) && defined(TARGET_XARCH) bool compStackCheckOnRet; // Check stack pointer on return to ensure it is correct. #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) bool compStackCheckOnCall; // Check stack pointer after call to ensure it is correct. Only for x86. #endif // defined(DEBUG) && defined(TARGET_X86) bool compReloc; // Generate relocs for pointers in code, true for all ngen/prejit codegen #ifdef DEBUG #if defined(TARGET_XARCH) bool compEnablePCRelAddr; // Whether absolute addr be encoded as PC-rel offset by RyuJIT where possible #endif #endif // DEBUG #ifdef UNIX_AMD64_ABI // This flag is indicating if there is a need to align the frame. // On AMD64-Windows, if there are calls, 4 slots for the outgoing ars are allocated, except for // FastTailCall. This slots makes the frame size non-zero, so alignment logic will be called. // On AMD64-Unix, there are no such slots. There is a possibility to have calls in the method with frame size of // 0. The frame alignment logic won't kick in. This flags takes care of the AMD64-Unix case by remembering that // there are calls and making sure the frame alignment logic is executed. bool compNeedToAlignFrame; #endif // UNIX_AMD64_ABI bool compProcedureSplitting; // Separate cold code from hot code bool genFPorder; // Preserve FP order (operations are non-commutative) bool genFPopt; // Can we do frame-pointer-omission optimization? bool altJit; // True if we are an altjit and are compiling this method #ifdef OPT_CONFIG bool optRepeat; // Repeat optimizer phases k times #endif #ifdef DEBUG bool compProcedureSplittingEH; // Separate cold code from hot code for functions with EH bool dspCode; // Display native code generated bool dspEHTable; // Display the EH table reported to the VM bool dspDebugInfo; // Display the Debug info reported to the VM bool dspInstrs; // Display the IL instructions intermixed with the native code output bool dspLines; // Display source-code lines intermixed with native code output bool dmpHex; // Display raw bytes in hex of native code output bool varNames; // Display variables names in native code output bool disAsm; // Display native code as it is generated bool disAsmSpilled; // Display native code when any register spilling occurs bool disasmWithGC; // Display GC info interleaved with disassembly. bool disDiffable; // Makes the Disassembly code 'diff-able' bool disAddr; // Display process address next to each instruction in disassembly code bool disAlignment; // Display alignment boundaries in disassembly code bool disAsm2; // Display native code after it is generated using external disassembler bool dspOrder; // Display names of each of the methods that we ngen/jit bool dspUnwind; // Display the unwind info output bool dspDiffable; // Makes the Jit Dump 'diff-able' (currently uses same COMPlus_* flag as disDiffable) bool compLongAddress; // Force using large pseudo instructions for long address // (IF_LARGEJMP/IF_LARGEADR/IF_LARGLDC) bool dspGCtbls; // Display the GC tables #endif bool compExpandCallsEarly; // True if we should expand virtual call targets early for this method // Default numbers used to perform loop alignment. All the numbers are choosen // based on experimenting with various benchmarks. // Default minimum loop block weight required to enable loop alignment. #define DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT 4 // By default a loop will be aligned at 32B address boundary to get better // performance as per architecture manuals. #define DEFAULT_ALIGN_LOOP_BOUNDARY 0x20 // For non-adaptive loop alignment, by default, only align a loop whose size is // at most 3 times the alignment block size. If the loop is bigger than that, it is most // likely complicated enough that loop alignment will not impact performance. #define DEFAULT_MAX_LOOPSIZE_FOR_ALIGN DEFAULT_ALIGN_LOOP_BOUNDARY * 3 #ifdef DEBUG // Loop alignment variables // If set, for non-adaptive alignment, ensure loop jmps are not on or cross alignment boundary. bool compJitAlignLoopForJcc; #endif // For non-adaptive alignment, minimum loop size (in bytes) for which alignment will be done. unsigned short compJitAlignLoopMaxCodeSize; // Minimum weight needed for the first block of a loop to make it a candidate for alignment. unsigned short compJitAlignLoopMinBlockWeight; // For non-adaptive alignment, address boundary (power of 2) at which loop alignment should // be done. By default, 32B. unsigned short compJitAlignLoopBoundary; // Padding limit to align a loop. unsigned short compJitAlignPaddingLimit; // If set, perform adaptive loop alignment that limits number of padding based on loop size. bool compJitAlignLoopAdaptive; // If set, tries to hide alignment instructions behind unconditional jumps. bool compJitHideAlignBehindJmp; #ifdef LATE_DISASM bool doLateDisasm; // Run the late disassembler #endif // LATE_DISASM #if DUMP_GC_TABLES && !defined(DEBUG) #pragma message("NOTE: this non-debug build has GC ptr table dumping always enabled!") static const bool dspGCtbls = true; #endif #ifdef PROFILING_SUPPORTED // Whether to emit Enter/Leave/TailCall hooks using a dummy stub (DummyProfilerELTStub()). // This option helps make the JIT behave as if it is running under a profiler. bool compJitELTHookEnabled; #endif // PROFILING_SUPPORTED #if FEATURE_TAILCALL_OPT // Whether opportunistic or implicit tail call optimization is enabled. bool compTailCallOpt; // Whether optimization of transforming a recursive tail call into a loop is enabled. bool compTailCallLoopOpt; #endif #if FEATURE_FASTTAILCALL // Whether fast tail calls are allowed. bool compFastTailCalls; #endif // FEATURE_FASTTAILCALL #if defined(TARGET_ARM64) // Decision about whether to save FP/LR registers with callee-saved registers (see // COMPlus_JitSaveFpLrWithCalleSavedRegisters). int compJitSaveFpLrWithCalleeSavedRegisters; #endif // defined(TARGET_ARM64) #ifdef CONFIGURABLE_ARM_ABI bool compUseSoftFP = false; #else #ifdef ARM_SOFTFP static const bool compUseSoftFP = true; #else // !ARM_SOFTFP static const bool compUseSoftFP = false; #endif // ARM_SOFTFP #endif // CONFIGURABLE_ARM_ABI } opts; static bool s_pAltJitExcludeAssembliesListInitialized; static AssemblyNamesList2* s_pAltJitExcludeAssembliesList; #ifdef DEBUG static bool s_pJitDisasmIncludeAssembliesListInitialized; static AssemblyNamesList2* s_pJitDisasmIncludeAssembliesList; static bool s_pJitFunctionFileInitialized; static MethodSet* s_pJitMethodSet; #endif // DEBUG #ifdef DEBUG // silence warning of cast to greater size. It is easier to silence than construct code the compiler is happy with, and // it is safe in this case #pragma warning(push) #pragma warning(disable : 4312) template <typename T> T dspPtr(T p) { return (p == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : p); } template <typename T> T dspOffset(T o) { return (o == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : o); } #pragma warning(pop) static int dspTreeID(GenTree* tree) { return tree->gtTreeID; } static void printStmtID(Statement* stmt) { assert(stmt != nullptr); printf(FMT_STMT, stmt->GetID()); } static void printTreeID(GenTree* tree) { if (tree == nullptr) { printf("[------]"); } else { printf("[%06d]", dspTreeID(tree)); } } const char* pgoSourceToString(ICorJitInfo::PgoSource p); const char* devirtualizationDetailToString(CORINFO_DEVIRTUALIZATION_DETAIL detail); #endif // DEBUG // clang-format off #define STRESS_MODES \ \ STRESS_MODE(NONE) \ \ /* "Variations" stress areas which we try to mix up with each other. */ \ /* These should not be exhaustively used as they might */ \ /* hide/trivialize other areas */ \ \ STRESS_MODE(REGS) \ STRESS_MODE(DBL_ALN) \ STRESS_MODE(LCL_FLDS) \ STRESS_MODE(UNROLL_LOOPS) \ STRESS_MODE(MAKE_CSE) \ STRESS_MODE(LEGACY_INLINE) \ STRESS_MODE(CLONE_EXPR) \ STRESS_MODE(USE_CMOV) \ STRESS_MODE(FOLD) \ STRESS_MODE(MERGED_RETURNS) \ STRESS_MODE(BB_PROFILE) \ STRESS_MODE(OPT_BOOLS_GC) \ STRESS_MODE(REMORPH_TREES) \ STRESS_MODE(64RSLT_MUL) \ STRESS_MODE(DO_WHILE_LOOPS) \ STRESS_MODE(MIN_OPTS) \ STRESS_MODE(REVERSE_FLAG) /* Will set GTF_REVERSE_OPS whenever we can */ \ STRESS_MODE(REVERSE_COMMA) /* Will reverse commas created with gtNewCommaNode */ \ STRESS_MODE(TAILCALL) /* Will make the call as a tailcall whenever legal */ \ STRESS_MODE(CATCH_ARG) /* Will spill catch arg */ \ STRESS_MODE(UNSAFE_BUFFER_CHECKS) \ STRESS_MODE(NULL_OBJECT_CHECK) \ STRESS_MODE(PINVOKE_RESTORE_ESP) \ STRESS_MODE(RANDOM_INLINE) \ STRESS_MODE(SWITCH_CMP_BR_EXPANSION) \ STRESS_MODE(GENERIC_VARN) \ STRESS_MODE(PROFILER_CALLBACKS) /* Will generate profiler hooks for ELT callbacks */ \ STRESS_MODE(BYREF_PROMOTION) /* Change undoPromotion decisions for byrefs */ \ STRESS_MODE(PROMOTE_FEWER_STRUCTS)/* Don't promote some structs that can be promoted */ \ STRESS_MODE(VN_BUDGET)/* Randomize the VN budget */ \ \ /* After COUNT_VARN, stress level 2 does all of these all the time */ \ \ STRESS_MODE(COUNT_VARN) \ \ /* "Check" stress areas that can be exhaustively used if we */ \ /* dont care about performance at all */ \ \ STRESS_MODE(FORCE_INLINE) /* Treat every method as AggressiveInlining */ \ STRESS_MODE(CHK_FLOW_UPDATE) \ STRESS_MODE(EMITTER) \ STRESS_MODE(CHK_REIMPORT) \ STRESS_MODE(FLATFP) \ STRESS_MODE(GENERIC_CHECK) \ STRESS_MODE(COUNT) enum compStressArea { #define STRESS_MODE(mode) STRESS_##mode, STRESS_MODES #undef STRESS_MODE }; // clang-format on #ifdef DEBUG static const LPCWSTR s_compStressModeNames[STRESS_COUNT + 1]; BYTE compActiveStressModes[STRESS_COUNT]; #endif // DEBUG #define MAX_STRESS_WEIGHT 100 bool compStressCompile(compStressArea stressArea, unsigned weightPercentage); bool compStressCompileHelper(compStressArea stressArea, unsigned weightPercentage); #ifdef DEBUG bool compInlineStress() { return compStressCompile(STRESS_LEGACY_INLINE, 50); } bool compRandomInlineStress() { return compStressCompile(STRESS_RANDOM_INLINE, 50); } bool compPromoteFewerStructs(unsigned lclNum); #endif // DEBUG bool compTailCallStress() { #ifdef DEBUG // Do not stress tailcalls in IL stubs as the runtime creates several IL // stubs to implement the tailcall mechanism, which would then // recursively create more IL stubs. return !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && (JitConfig.TailcallStress() != 0 || compStressCompile(STRESS_TAILCALL, 5)); #else return false; #endif } const char* compGetTieringName(bool wantShortName = false) const; const char* compGetStressMessage() const; codeOptimize compCodeOpt() const { #if 0 // Switching between size & speed has measurable throughput impact // (3.5% on NGen CoreLib when measured). It used to be enabled for // DEBUG, but should generate identical code between CHK & RET builds, // so that's not acceptable. // TODO-Throughput: Figure out what to do about size vs. speed & throughput. // Investigate the cause of the throughput regression. return opts.compCodeOpt; #else return BLENDED_CODE; #endif } //--------------------- Info about the procedure -------------------------- struct Info { COMP_HANDLE compCompHnd; CORINFO_MODULE_HANDLE compScopeHnd; CORINFO_CLASS_HANDLE compClassHnd; CORINFO_METHOD_HANDLE compMethodHnd; CORINFO_METHOD_INFO* compMethodInfo; bool hasCircularClassConstraints; bool hasCircularMethodConstraints; #if defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS const char* compMethodName; const char* compClassName; const char* compFullName; double compPerfScore; int compMethodSuperPMIIndex; // useful when debugging under SuperPMI #endif // defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS #if defined(DEBUG) || defined(INLINE_DATA) // Method hash is logically const, but computed // on first demand. mutable unsigned compMethodHashPrivate; unsigned compMethodHash() const; #endif // defined(DEBUG) || defined(INLINE_DATA) #ifdef PSEUDORANDOM_NOP_INSERTION // things for pseudorandom nop insertion unsigned compChecksum; CLRRandom compRNG; #endif // The following holds the FLG_xxxx flags for the method we're compiling. unsigned compFlags; // The following holds the class attributes for the method we're compiling. unsigned compClassAttr; const BYTE* compCode; IL_OFFSET compILCodeSize; // The IL code size IL_OFFSET compILImportSize; // Estimated amount of IL actually imported IL_OFFSET compILEntry; // The IL entry point (normally 0) PatchpointInfo* compPatchpointInfo; // Patchpoint data for OSR (normally nullptr) UNATIVE_OFFSET compNativeCodeSize; // The native code size, after instructions are issued. This // is less than (compTotalHotCodeSize + compTotalColdCodeSize) only if: // (1) the code is not hot/cold split, and we issued less code than we expected, or // (2) the code is hot/cold split, and we issued less code than we expected // in the cold section (the hot section will always be padded out to compTotalHotCodeSize). bool compIsStatic : 1; // Is the method static (no 'this' pointer)? bool compIsVarArgs : 1; // Does the method have varargs parameters? bool compInitMem : 1; // Is the CORINFO_OPT_INIT_LOCALS bit set in the method info options? bool compProfilerCallback : 1; // JIT inserted a profiler Enter callback bool compPublishStubParam : 1; // EAX captured in prolog will be available through an intrinsic bool compHasNextCallRetAddr : 1; // The NextCallReturnAddress intrinsic is used. var_types compRetType; // Return type of the method as declared in IL var_types compRetNativeType; // Normalized return type as per target arch ABI unsigned compILargsCount; // Number of arguments (incl. implicit but not hidden) unsigned compArgsCount; // Number of arguments (incl. implicit and hidden) #if FEATURE_FASTTAILCALL unsigned compArgStackSize; // Incoming argument stack size in bytes #endif // FEATURE_FASTTAILCALL unsigned compRetBuffArg; // position of hidden return param var (0, 1) (BAD_VAR_NUM means not present); int compTypeCtxtArg; // position of hidden param for type context for generic code (CORINFO_CALLCONV_PARAMTYPE) unsigned compThisArg; // position of implicit this pointer param (not to be confused with lvaArg0Var) unsigned compILlocalsCount; // Number of vars : args + locals (incl. implicit but not hidden) unsigned compLocalsCount; // Number of vars : args + locals (incl. implicit and hidden) unsigned compMaxStack; UNATIVE_OFFSET compTotalHotCodeSize; // Total number of bytes of Hot Code in the method UNATIVE_OFFSET compTotalColdCodeSize; // Total number of bytes of Cold Code in the method unsigned compUnmanagedCallCountWithGCTransition; // count of unmanaged calls with GC transition. CorInfoCallConvExtension compCallConv; // The entry-point calling convention for this method. unsigned compLvFrameListRoot; // lclNum for the Frame root unsigned compXcptnsCount; // Number of exception-handling clauses read in the method's IL. // You should generally use compHndBBtabCount instead: it is the // current number of EH clauses (after additions like synchronized // methods and funclets, and removals like unreachable code deletion). Target::ArgOrder compArgOrder; bool compMatchedVM; // true if the VM is "matched": either the JIT is a cross-compiler // and the VM expects that, or the JIT is a "self-host" compiler // (e.g., x86 hosted targeting x86) and the VM expects that. /* The following holds IL scope information about local variables. */ unsigned compVarScopesCount; VarScopeDsc* compVarScopes; /* The following holds information about instr offsets for * which we need to report IP-mappings */ IL_OFFSET* compStmtOffsets; // sorted unsigned compStmtOffsetsCount; ICorDebugInfo::BoundaryTypes compStmtOffsetsImplicit; #define CPU_X86 0x0100 // The generic X86 CPU #define CPU_X86_PENTIUM_4 0x0110 #define CPU_X64 0x0200 // The generic x64 CPU #define CPU_AMD_X64 0x0210 // AMD x64 CPU #define CPU_INTEL_X64 0x0240 // Intel x64 CPU #define CPU_ARM 0x0300 // The generic ARM CPU #define CPU_ARM64 0x0400 // The generic ARM64 CPU unsigned genCPU; // What CPU are we running on // Number of class profile probes in this method unsigned compClassProbeCount; } info; // Returns true if the method being compiled returns a non-void and non-struct value. // Note that lvaInitTypeRef() normalizes compRetNativeType for struct returns in a // single register as per target arch ABI (e.g on Amd64 Windows structs of size 1, 2, // 4 or 8 gets normalized to TYP_BYTE/TYP_SHORT/TYP_INT/TYP_LONG; On Arm HFA structs). // Methods returning such structs are considered to return non-struct return value and // this method returns true in that case. bool compMethodReturnsNativeScalarType() { return (info.compRetType != TYP_VOID) && !varTypeIsStruct(info.compRetNativeType); } // Returns true if the method being compiled returns RetBuf addr as its return value bool compMethodReturnsRetBufAddr() { // There are cases where implicit RetBuf argument should be explicitly returned in a register. // In such cases the return type is changed to TYP_BYREF and appropriate IR is generated. // These cases are: CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_AMD64 // 1. on x64 Windows and Unix the address of RetBuf needs to be returned by // methods with hidden RetBufArg in RAX. In such case GT_RETURN is of TYP_BYREF, // returning the address of RetBuf. return (info.compRetBuffArg != BAD_VAR_NUM); #else // TARGET_AMD64 #ifdef PROFILING_SUPPORTED // 2. Profiler Leave callback expects the address of retbuf as return value for // methods with hidden RetBuf argument. impReturnInstruction() when profiler // callbacks are needed creates GT_RETURN(TYP_BYREF, op1 = Addr of RetBuf) for // methods with hidden RetBufArg. if (compIsProfilerHookNeeded()) { return (info.compRetBuffArg != BAD_VAR_NUM); } #endif // 3. Windows ARM64 native instance calling convention requires the address of RetBuff // to be returned in x0. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM64) if (TargetOS::IsWindows) { auto callConv = info.compCallConv; if (callConvIsInstanceMethodCallConv(callConv)) { return (info.compRetBuffArg != BAD_VAR_NUM); } } #endif // TARGET_ARM64 // 4. x86 unmanaged calling conventions require the address of RetBuff to be returned in eax. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) if (info.compCallConv != CorInfoCallConvExtension::Managed) { return (info.compRetBuffArg != BAD_VAR_NUM); } #endif return false; #endif // TARGET_AMD64 } // Returns true if the method returns a value in more than one return register // TODO-ARM-Bug: Deal with multi-register genReturnLocaled structs? // TODO-ARM64: Does this apply for ARM64 too? bool compMethodReturnsMultiRegRetType() { #if FEATURE_MULTIREG_RET #if defined(TARGET_X86) // On x86, 64-bit longs and structs are returned in multiple registers return varTypeIsLong(info.compRetNativeType) || (varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM)); #else // targets: X64-UNIX, ARM64 or ARM32 // On all other targets that support multireg return values: // Methods returning a struct in multiple registers have a return value of TYP_STRUCT. // Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM); #endif // TARGET_XXX #else // not FEATURE_MULTIREG_RET // For this architecture there are no multireg returns return false; #endif // FEATURE_MULTIREG_RET } bool compEnregLocals() { return ((opts.compFlags & CLFLG_REGVAR) != 0); } bool compEnregStructLocals() { return (JitConfig.JitEnregStructLocals() != 0); } bool compObjectStackAllocation() { return (JitConfig.JitObjectStackAllocation() != 0); } // Returns true if the method returns a value in more than one return register, // it should replace/be merged with compMethodReturnsMultiRegRetType when #36868 is fixed. // The difference from original `compMethodReturnsMultiRegRetType` is in ARM64 SIMD* handling, // this method correctly returns false for it (it is passed as HVA), when the original returns true. bool compMethodReturnsMultiRegRegTypeAlternate() { #if FEATURE_MULTIREG_RET #if defined(TARGET_X86) // On x86, 64-bit longs and structs are returned in multiple registers return varTypeIsLong(info.compRetNativeType) || (varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM)); #else // targets: X64-UNIX, ARM64 or ARM32 #if defined(TARGET_ARM64) // TYP_SIMD* are returned in one register. if (varTypeIsSIMD(info.compRetNativeType)) { return false; } #endif // On all other targets that support multireg return values: // Methods returning a struct in multiple registers have a return value of TYP_STRUCT. // Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM); #endif // TARGET_XXX #else // not FEATURE_MULTIREG_RET // For this architecture there are no multireg returns return false; #endif // FEATURE_MULTIREG_RET } // Returns true if the method being compiled returns a value bool compMethodHasRetVal() { return compMethodReturnsNativeScalarType() || compMethodReturnsRetBufAddr() || compMethodReturnsMultiRegRetType(); } // Returns true if the method requires a PInvoke prolog and epilog bool compMethodRequiresPInvokeFrame() { return (info.compUnmanagedCallCountWithGCTransition > 0); } // Returns true if address-exposed user variables should be poisoned with a recognizable value bool compShouldPoisonFrame() { #ifdef FEATURE_ON_STACK_REPLACEMENT if (opts.IsOSR()) return false; #endif return !info.compInitMem && opts.compDbgCode; } // Returns true if the jit supports having patchpoints in this method. // Optionally, get the reason why not. bool compCanHavePatchpoints(const char** reason = nullptr); #if defined(DEBUG) void compDispLocalVars(); #endif // DEBUG private: class ClassLayoutTable* m_classLayoutTable; class ClassLayoutTable* typCreateClassLayoutTable(); class ClassLayoutTable* typGetClassLayoutTable(); public: // Get the layout having the specified layout number. ClassLayout* typGetLayoutByNum(unsigned layoutNum); // Get the layout number of the specified layout. unsigned typGetLayoutNum(ClassLayout* layout); // Get the layout having the specified size but no class handle. ClassLayout* typGetBlkLayout(unsigned blockSize); // Get the number of a layout having the specified size but no class handle. unsigned typGetBlkLayoutNum(unsigned blockSize); // Get the layout for the specified class handle. ClassLayout* typGetObjLayout(CORINFO_CLASS_HANDLE classHandle); // Get the number of a layout for the specified class handle. unsigned typGetObjLayoutNum(CORINFO_CLASS_HANDLE classHandle); //-------------------------- Global Compiler Data ------------------------------------ #ifdef DEBUG private: static LONG s_compMethodsCount; // to produce unique label names #endif public: #ifdef DEBUG LONG compMethodID; unsigned compGenTreeID; unsigned compStatementID; unsigned compBasicBlockID; #endif BasicBlock* compCurBB; // the current basic block in process Statement* compCurStmt; // the current statement in process GenTree* compCurTree; // the current tree in process // The following is used to create the 'method JIT info' block. size_t compInfoBlkSize; BYTE* compInfoBlkAddr; EHblkDsc* compHndBBtab; // array of EH data unsigned compHndBBtabCount; // element count of used elements in EH data array unsigned compHndBBtabAllocCount; // element count of allocated elements in EH data array #if defined(TARGET_X86) //------------------------------------------------------------------------- // Tracking of region covered by the monitor in synchronized methods void* syncStartEmitCookie; // the emitter cookie for first instruction after the call to MON_ENTER void* syncEndEmitCookie; // the emitter cookie for first instruction after the call to MON_EXIT #endif // !TARGET_X86 Phases mostRecentlyActivePhase; // the most recently active phase PhaseChecks activePhaseChecks; // the currently active phase checks //------------------------------------------------------------------------- // The following keeps track of how many bytes of local frame space we've // grabbed so far in the current function, and how many argument bytes we // need to pop when we return. // unsigned compLclFrameSize; // secObject+lclBlk+locals+temps // Count of callee-saved regs we pushed in the prolog. // Does not include EBP for isFramePointerUsed() and double-aligned frames. // In case of Amd64 this doesn't include float regs saved on stack. unsigned compCalleeRegsPushed; #if defined(TARGET_XARCH) // Mask of callee saved float regs on stack. regMaskTP compCalleeFPRegsSavedMask; #endif #ifdef TARGET_AMD64 // Quirk for VS debug-launch scenario to work: // Bytes of padding between save-reg area and locals. #define VSQUIRK_STACK_PAD (2 * REGSIZE_BYTES) unsigned compVSQuirkStackPaddingNeeded; #endif unsigned compArgSize; // total size of arguments in bytes (including register args (lvIsRegArg)) unsigned compMapILargNum(unsigned ILargNum); // map accounting for hidden args unsigned compMapILvarNum(unsigned ILvarNum); // map accounting for hidden args unsigned compMap2ILvarNum(unsigned varNum) const; // map accounting for hidden args #if defined(TARGET_ARM64) struct FrameInfo { // Frame type (1-5) int frameType; // Distance from established (method body) SP to base of callee save area int calleeSaveSpOffset; // Amount to subtract from SP before saving (prolog) OR // to add to SP after restoring (epilog) callee saves int calleeSaveSpDelta; // Distance from established SP to where caller's FP was saved int offsetSpToSavedFp; } compFrameInfo; #endif //------------------------------------------------------------------------- static void compStartup(); // One-time initialization static void compShutdown(); // One-time finalization void compInit(ArenaAllocator* pAlloc, CORINFO_METHOD_HANDLE methodHnd, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, InlineInfo* inlineInfo); void compDone(); static void compDisplayStaticSizes(FILE* fout); //------------ Some utility functions -------------- void* compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ void** ppIndirection); /* OUT */ // Several JIT/EE interface functions return a CorInfoType, and also return a // class handle as an out parameter if the type is a value class. Returns the // size of the type these describe. unsigned compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd); // Returns true if the method being compiled has a return buffer. bool compHasRetBuffArg(); #ifdef DEBUG // Components used by the compiler may write unit test suites, and // have them run within this method. They will be run only once per process, and only // in debug. (Perhaps should be under the control of a COMPlus_ flag.) // These should fail by asserting. void compDoComponentUnitTestsOnce(); #endif // DEBUG int compCompile(CORINFO_MODULE_HANDLE classPtr, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags); void compCompileFinish(); int compCompileHelper(CORINFO_MODULE_HANDLE classPtr, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlag); ArenaAllocator* compGetArenaAllocator(); void generatePatchpointInfo(); #if MEASURE_MEM_ALLOC static bool s_dspMemStats; // Display per-phase memory statistics for every function #endif // MEASURE_MEM_ALLOC #if LOOP_HOIST_STATS unsigned m_loopsConsidered; bool m_curLoopHasHoistedExpression; unsigned m_loopsWithHoistedExpressions; unsigned m_totalHoistedExpressions; void AddLoopHoistStats(); void PrintPerMethodLoopHoistStats(); static CritSecObject s_loopHoistStatsLock; // This lock protects the data structures below. static unsigned s_loopsConsidered; static unsigned s_loopsWithHoistedExpressions; static unsigned s_totalHoistedExpressions; static void PrintAggregateLoopHoistStats(FILE* f); #endif // LOOP_HOIST_STATS #if TRACK_ENREG_STATS class EnregisterStats { private: unsigned m_totalNumberOfVars; unsigned m_totalNumberOfStructVars; unsigned m_totalNumberOfEnregVars; unsigned m_totalNumberOfStructEnregVars; unsigned m_addrExposed; unsigned m_VMNeedsStackAddr; unsigned m_localField; unsigned m_blockOp; unsigned m_dontEnregStructs; unsigned m_notRegSizeStruct; unsigned m_structArg; unsigned m_lclAddrNode; unsigned m_castTakesAddr; unsigned m_storeBlkSrc; unsigned m_oneAsgRetyping; unsigned m_swizzleArg; unsigned m_blockOpRet; unsigned m_returnSpCheck; unsigned m_simdUserForcesDep; unsigned m_liveInOutHndlr; unsigned m_depField; unsigned m_noRegVars; unsigned m_minOptsGC; #ifdef JIT32_GCENCODER unsigned m_PinningRef; #endif // JIT32_GCENCODER #if !defined(TARGET_64BIT) unsigned m_longParamField; #endif // !TARGET_64BIT unsigned m_parentExposed; unsigned m_tooConservative; unsigned m_escapeAddress; unsigned m_osrExposed; unsigned m_stressLclFld; unsigned m_copyFldByFld; unsigned m_dispatchRetBuf; unsigned m_wideIndir; public: void RecordLocal(const LclVarDsc* varDsc); void Dump(FILE* fout) const; }; static EnregisterStats s_enregisterStats; #endif // TRACK_ENREG_STATS bool compIsForImportOnly(); bool compIsForInlining() const; bool compDonotInline(); #ifdef DEBUG // Get the default fill char value we randomize this value when JitStress is enabled. static unsigned char compGetJitDefaultFill(Compiler* comp); const char* compLocalVarName(unsigned varNum, unsigned offs); VarName compVarName(regNumber reg, bool isFloatReg = false); const char* compRegVarName(regNumber reg, bool displayVar = false, bool isFloatReg = false); const char* compRegNameForSize(regNumber reg, size_t size); const char* compFPregVarName(unsigned fpReg, bool displayVar = false); void compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP); void compDspSrcLinesByLineNum(unsigned line, bool seek = false); #endif // DEBUG //------------------------------------------------------------------------- struct VarScopeListNode { VarScopeDsc* data; VarScopeListNode* next; static VarScopeListNode* Create(VarScopeDsc* value, CompAllocator alloc) { VarScopeListNode* node = new (alloc) VarScopeListNode; node->data = value; node->next = nullptr; return node; } }; struct VarScopeMapInfo { VarScopeListNode* head; VarScopeListNode* tail; static VarScopeMapInfo* Create(VarScopeListNode* node, CompAllocator alloc) { VarScopeMapInfo* info = new (alloc) VarScopeMapInfo; info->head = node; info->tail = node; return info; } }; // Max value of scope count for which we would use linear search; for larger values we would use hashtable lookup. static const unsigned MAX_LINEAR_FIND_LCL_SCOPELIST = 32; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, VarScopeMapInfo*> VarNumToScopeDscMap; // Map to keep variables' scope indexed by varNum containing it's scope dscs at the index. VarNumToScopeDscMap* compVarScopeMap; VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd); VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned offs); VarScopeDsc* compFindLocalVarLinear(unsigned varNum, unsigned offs); void compInitVarScopeMap(); VarScopeDsc** compEnterScopeList; // List has the offsets where variables // enter scope, sorted by instr offset unsigned compNextEnterScope; VarScopeDsc** compExitScopeList; // List has the offsets where variables // go out of scope, sorted by instr offset unsigned compNextExitScope; void compInitScopeLists(); void compResetScopeLists(); VarScopeDsc* compGetNextEnterScope(unsigned offs, bool scan = false); VarScopeDsc* compGetNextExitScope(unsigned offs, bool scan = false); void compProcessScopesUntil(unsigned offset, VARSET_TP* inScope, void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*), void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*)); #ifdef DEBUG void compDispScopeLists(); #endif // DEBUG bool compIsProfilerHookNeeded(); //------------------------------------------------------------------------- /* Statistical Data Gathering */ void compJitStats(); // call this function and enable // various ifdef's below for statistical data #if CALL_ARG_STATS void compCallArgStats(); static void compDispCallArgStats(FILE* fout); #endif //------------------------------------------------------------------------- protected: #ifdef DEBUG bool skipMethod(); #endif ArenaAllocator* compArenaAllocator; public: void compFunctionTraceStart(); void compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI); protected: size_t compMaxUncheckedOffsetForNullObject; void compInitOptions(JitFlags* compileFlags); void compSetProcessor(); void compInitDebuggingInfo(); void compSetOptimizationLevel(); #ifdef TARGET_ARMARCH bool compRsvdRegCheck(FrameLayoutState curState); #endif void compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags); // Clear annotations produced during optimizations; to be used between iterations when repeating opts. void ResetOptAnnotations(); // Regenerate loop descriptors; to be used between iterations when repeating opts. void RecomputeLoopInfo(); #ifdef PROFILING_SUPPORTED // Data required for generating profiler Enter/Leave/TailCall hooks bool compProfilerHookNeeded; // Whether profiler Enter/Leave/TailCall hook needs to be generated for the method void* compProfilerMethHnd; // Profiler handle of the method being compiled. Passed as param to ELT callbacks bool compProfilerMethHndIndirected; // Whether compProfilerHandle is pointer to the handle or is an actual handle #endif public: // Assumes called as part of process shutdown; does any compiler-specific work associated with that. static void ProcessShutdownWork(ICorStaticInfo* statInfo); CompAllocator getAllocator(CompMemKind cmk = CMK_Generic) { return CompAllocator(compArenaAllocator, cmk); } CompAllocator getAllocatorGC() { return getAllocator(CMK_GC); } CompAllocator getAllocatorLoopHoist() { return getAllocator(CMK_LoopHoist); } #ifdef DEBUG CompAllocator getAllocatorDebugOnly() { return getAllocator(CMK_DebugOnly); } #endif // DEBUG /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX typeInfo XX XX XX XX Checks for type compatibility and merges types XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // Returns true if child is equal to or a subtype of parent for merge purposes // This support is necessary to suport attributes that are not described in // for example, signatures. For example, the permanent home byref (byref that // points to the gc heap), isn't a property of method signatures, therefore, // it is safe to have mismatches here (that tiCompatibleWith will not flag), // but when deciding if we need to reimport a block, we need to take these // in account bool tiMergeCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const; // Returns true if child is equal to or a subtype of parent. // normalisedForStack indicates that both types are normalised for the stack bool tiCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const; // Merges pDest and pSrc. Returns false if merge is undefined. // *pDest is modified to represent the merged type. Sets "*changed" to true // if this changes "*pDest". bool tiMergeToCommonParent(typeInfo* pDest, const typeInfo* pSrc, bool* changed) const; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX IL verification stuff XX XX XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // The following is used to track liveness of local variables, initialization // of valueclass constructors, and type safe use of IL instructions. // dynamic state info needed for verification EntryState verCurrentState; // this ptr of object type .ctors are considered intited only after // the base class ctor is called, or an alternate ctor is called. // An uninited this ptr can be used to access fields, but cannot // be used to call a member function. bool verTrackObjCtorInitState; void verInitBBEntryState(BasicBlock* block, EntryState* currentState); // Requires that "tis" is not TIS_Bottom -- it's a definite init/uninit state. void verSetThisInit(BasicBlock* block, ThisInitState tis); void verInitCurrentState(); void verResetCurrentState(BasicBlock* block, EntryState* currentState); // Merges the current verification state into the entry state of "block", return false if that merge fails, // TRUE if it succeeds. Further sets "*changed" to true if this changes the entry state of "block". bool verMergeEntryStates(BasicBlock* block, bool* changed); void verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)); void verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)); typeInfo verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef = false); // converts from jit type representation to typeInfo typeInfo verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd); // converts from jit type representation to typeInfo bool verIsSDArray(const typeInfo& ti); typeInfo verGetArrayElemType(const typeInfo& ti); typeInfo verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args); bool verIsByRefLike(const typeInfo& ti); bool verIsSafeToReturnByRef(const typeInfo& ti); // generic type variables range over types that satisfy IsBoxable bool verIsBoxable(const typeInfo& ti); void DECLSPEC_NORETURN verRaiseVerifyException(INDEBUG(const char* reason) DEBUGARG(const char* file) DEBUGARG(unsigned line)); void verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* reason) DEBUGARG(const char* file) DEBUGARG(unsigned line)); bool verCheckTailCallConstraint(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call // on a type parameter? bool speculative // If true, won't throw if verificatoin fails. Instead it will // return false to the caller. // If false, it will throw. ); bool verIsBoxedValueType(const typeInfo& ti); void verVerifyCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, bool tailCall, bool readonlyCall, // is this a "readonly." call? const BYTE* delegateCreateStart, const BYTE* codeAddr, CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName)); bool verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef); typeInfo verVerifySTIND(const typeInfo& ptr, const typeInfo& value, const typeInfo& instrType); typeInfo verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType); void verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken, const CORINFO_FIELD_INFO& fieldInfo, const typeInfo* tiThis, bool mutator, bool allowPlainStructAsThis = false); void verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode); void verVerifyThisPtrInitialised(); bool verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target); #ifdef DEBUG // One line log function. Default level is 0. Increasing it gives you // more log information // levels are currently unused: #define JITDUMP(level,...) (); void JitLogEE(unsigned level, const char* fmt, ...); bool compDebugBreak; bool compJitHaltMethod(); #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX GS Security checks for unsafe buffers XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: struct ShadowParamVarInfo { FixedBitVect* assignGroup; // the closure set of variables whose values depend on each other unsigned shadowCopy; // Lcl var num, if not valid set to BAD_VAR_NUM static bool mayNeedShadowCopy(LclVarDsc* varDsc) { #if defined(TARGET_AMD64) // GS cookie logic to create shadow slots, create trees to copy reg args to shadow // slots and update all trees to refer to shadow slots is done immediately after // fgMorph(). Lsra could potentially mark a param as DoNotEnregister after JIT determines // not to shadow a parameter. Also, LSRA could potentially spill a param which is passed // in register. Therefore, conservatively all params may need a shadow copy. Note that // GS cookie logic further checks whether the param is a ptr or an unsafe buffer before // creating a shadow slot even though this routine returns true. // // TODO-AMD64-CQ: Revisit this conservative approach as it could create more shadow slots than // required. There are two cases under which a reg arg could potentially be used from its // home location: // a) LSRA marks it as DoNotEnregister (see LinearScan::identifyCandidates()) // b) LSRA spills it // // Possible solution to address case (a) // - The conditions under which LSRA marks a varDsc as DoNotEnregister could be checked // in this routine. Note that live out of exception handler is something we may not be // able to do it here since GS cookie logic is invoked ahead of liveness computation. // Therefore, for methods with exception handling and need GS cookie check we might have // to take conservative approach. // // Possible solution to address case (b) // - Whenver a parameter passed in an argument register needs to be spilled by LSRA, we // create a new spill temp if the method needs GS cookie check. return varDsc->lvIsParam; #else // !defined(TARGET_AMD64) return varDsc->lvIsParam && !varDsc->lvIsRegArg; #endif } #ifdef DEBUG void Print() { printf("assignGroup [%p]; shadowCopy: [%d];\n", assignGroup, shadowCopy); } #endif }; GSCookie* gsGlobalSecurityCookieAddr; // Address of global cookie for unsafe buffer checks GSCookie gsGlobalSecurityCookieVal; // Value of global cookie if addr is NULL ShadowParamVarInfo* gsShadowVarInfo; // Table used by shadow param analysis code void gsGSChecksInitCookie(); // Grabs cookie variable void gsCopyShadowParams(); // Identify vulnerable params and create dhadow copies bool gsFindVulnerableParams(); // Shadow param analysis code void gsParamsToShadows(); // Insert copy code and replave param uses by shadow static fgWalkPreFn gsMarkPtrsAndAssignGroups; // Shadow param analysis tree-walk static fgWalkPreFn gsReplaceShadowParams; // Shadow param replacement tree-walk #define DEFAULT_MAX_INLINE_SIZE 100 // Methods with > DEFAULT_MAX_INLINE_SIZE IL bytes will never be inlined. // This can be overwritten by setting complus_JITInlineSize env variable. #define DEFAULT_MAX_INLINE_DEPTH 20 // Methods at more than this level deep will not be inlined #define DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE 32 // fixed locallocs of this size or smaller will convert to local buffers private: #ifdef FEATURE_JIT_METHOD_PERF JitTimer* pCompJitTimer; // Timer data structure (by phases) for current compilation. static CompTimeSummaryInfo s_compJitTimerSummary; // Summary of the Timer information for the whole run. static LPCWSTR JitTimeLogCsv(); // Retrieve the file name for CSV from ConfigDWORD. static LPCWSTR compJitTimeLogFilename; // If a log file for JIT time is desired, filename to write it to. #endif void BeginPhase(Phases phase); // Indicate the start of the given phase. void EndPhase(Phases phase); // Indicate the end of the given phase. #if MEASURE_CLRAPI_CALLS // Thin wrappers that call into JitTimer (if present). inline void CLRApiCallEnter(unsigned apix); inline void CLRApiCallLeave(unsigned apix); public: inline void CLR_API_Enter(API_ICorJitInfo_Names ename); inline void CLR_API_Leave(API_ICorJitInfo_Names ename); private: #endif #if defined(DEBUG) || defined(INLINE_DATA) // These variables are associated with maintaining SQM data about compile time. unsigned __int64 m_compCyclesAtEndOfInlining; // The thread-virtualized cycle count at the end of the inlining phase // in the current compilation. unsigned __int64 m_compCycles; // Net cycle count for current compilation DWORD m_compTickCountAtEndOfInlining; // The result of GetTickCount() (# ms since some epoch marker) at the end of // the inlining phase in the current compilation. #endif // defined(DEBUG) || defined(INLINE_DATA) // Records the SQM-relevant (cycles and tick count). Should be called after inlining is complete. // (We do this after inlining because this marks the last point at which the JIT is likely to cause // type-loading and class initialization). void RecordStateAtEndOfInlining(); // Assumes being called at the end of compilation. Update the SQM state. void RecordStateAtEndOfCompilation(); public: #if FUNC_INFO_LOGGING static LPCWSTR compJitFuncInfoFilename; // If a log file for per-function information is required, this is the // filename to write it to. static FILE* compJitFuncInfoFile; // And this is the actual FILE* to write to. #endif // FUNC_INFO_LOGGING Compiler* prevCompiler; // Previous compiler on stack for TLS Compiler* linked list for reentrant compilers. #if MEASURE_NOWAY void RecordNowayAssert(const char* filename, unsigned line, const char* condStr); #endif // MEASURE_NOWAY #ifndef FEATURE_TRACELOGGING // Should we actually fire the noway assert body and the exception handler? bool compShouldThrowOnNoway(); #else // FEATURE_TRACELOGGING // Should we actually fire the noway assert body and the exception handler? bool compShouldThrowOnNoway(const char* filename, unsigned line); // Telemetry instance to use per method compilation. JitTelemetry compJitTelemetry; // Get common parameters that have to be logged with most telemetry data. void compGetTelemetryDefaults(const char** assemblyName, const char** scopeName, const char** methodName, unsigned* methodHash); #endif // !FEATURE_TRACELOGGING #ifdef DEBUG private: NodeToTestDataMap* m_nodeTestData; static const unsigned FIRST_LOOP_HOIST_CSE_CLASS = 1000; unsigned m_loopHoistCSEClass; // LoopHoist test annotations turn into CSE requirements; we // label them with CSE Class #'s starting at FIRST_LOOP_HOIST_CSE_CLASS. // Current kept in this. public: NodeToTestDataMap* GetNodeTestData() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_nodeTestData == nullptr) { compRoot->m_nodeTestData = new (getAllocatorDebugOnly()) NodeToTestDataMap(getAllocatorDebugOnly()); } return compRoot->m_nodeTestData; } typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, int> NodeToIntMap; // Returns the set (i.e., the domain of the result map) of nodes that are keys in m_nodeTestData, and // currently occur in the AST graph. NodeToIntMap* FindReachableNodesInNodeTestData(); // Node "from" is being eliminated, and being replaced by node "to". If "from" had any associated // test data, associate that data with "to". void TransferTestDataToNode(GenTree* from, GenTree* to); // These are the methods that test that the various conditions implied by the // test attributes are satisfied. void JitTestCheckSSA(); // SSA builder tests. void JitTestCheckVN(); // Value numbering tests. #endif // DEBUG // The "FieldSeqStore", for canonicalizing field sequences. See the definition of FieldSeqStore for // operations. FieldSeqStore* m_fieldSeqStore; FieldSeqStore* GetFieldSeqStore() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_fieldSeqStore == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_FieldSeqStore, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_FieldSeqStore)); compRoot->m_fieldSeqStore = new (ialloc) FieldSeqStore(ialloc); } return compRoot->m_fieldSeqStore; } typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, FieldSeqNode*> NodeToFieldSeqMap; // Some nodes of "TYP_BYREF" or "TYP_I_IMPL" actually represent the address of a field within a struct, but since // the offset of the field is zero, there's no "GT_ADD" node. We normally attach a field sequence to the constant // that is added, but what do we do when that constant is zero, and is thus not present? We use this mechanism to // attach the field sequence directly to the address node. NodeToFieldSeqMap* m_zeroOffsetFieldMap; NodeToFieldSeqMap* GetZeroOffsetFieldMap() { // Don't need to worry about inlining here if (m_zeroOffsetFieldMap == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ZeroOffsetFieldMap, and use that for // allocation. CompAllocator ialloc(getAllocator(CMK_ZeroOffsetFieldMap)); m_zeroOffsetFieldMap = new (ialloc) NodeToFieldSeqMap(ialloc); } return m_zeroOffsetFieldMap; } // Requires that "op1" is a node of type "TYP_BYREF" or "TYP_I_IMPL". We are dereferencing this with the fields in // "fieldSeq", whose offsets are required all to be zero. Ensures that any field sequence annotation currently on // "op1" or its components is augmented by appending "fieldSeq". In practice, if "op1" is a GT_LCL_FLD, it has // a field sequence as a member; otherwise, it may be the addition of an a byref and a constant, where the const // has a field sequence -- in this case "fieldSeq" is appended to that of the constant; otherwise, we // record the the field sequence using the ZeroOffsetFieldMap described above. // // One exception above is that "op1" is a node of type "TYP_REF" where "op1" is a GT_LCL_VAR. // This happens when System.Object vtable pointer is a regular field at offset 0 in System.Private.CoreLib in // CoreRT. Such case is handled same as the default case. void fgAddFieldSeqForZeroOffset(GenTree* op1, FieldSeqNode* fieldSeq); typedef JitHashTable<const GenTree*, JitPtrKeyFuncs<GenTree>, ArrayInfo> NodeToArrayInfoMap; NodeToArrayInfoMap* m_arrayInfoMap; NodeToArrayInfoMap* GetArrayInfoMap() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_arrayInfoMap == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap)); compRoot->m_arrayInfoMap = new (ialloc) NodeToArrayInfoMap(ialloc); } return compRoot->m_arrayInfoMap; } //----------------------------------------------------------------------------------------------------------------- // Compiler::TryGetArrayInfo: // Given an indirection node, checks to see whether or not that indirection represents an array access, and // if so returns information about the array. // // Arguments: // indir - The `GT_IND` node. // arrayInfo (out) - Information about the accessed array if this function returns true. Undefined otherwise. // // Returns: // True if the `GT_IND` node represents an array access; false otherwise. bool TryGetArrayInfo(GenTreeIndir* indir, ArrayInfo* arrayInfo) { if ((indir->gtFlags & GTF_IND_ARR_INDEX) == 0) { return false; } if (indir->gtOp1->OperIs(GT_INDEX_ADDR)) { GenTreeIndexAddr* const indexAddr = indir->gtOp1->AsIndexAddr(); *arrayInfo = ArrayInfo(indexAddr->gtElemType, indexAddr->gtElemSize, indexAddr->gtElemOffset, indexAddr->gtStructElemClass); return true; } bool found = GetArrayInfoMap()->Lookup(indir, arrayInfo); assert(found); return true; } NodeToUnsignedMap* m_memorySsaMap[MemoryKindCount]; // In some cases, we want to assign intermediate SSA #'s to memory states, and know what nodes create those memory // states. (We do this for try blocks, where, if the try block doesn't do a call that loses track of the memory // state, all the possible memory states are possible initial states of the corresponding catch block(s).) NodeToUnsignedMap* GetMemorySsaMap(MemoryKind memoryKind) { if (memoryKind == GcHeap && byrefStatesMatchGcHeapStates) { // Use the same map for GCHeap and ByrefExposed when their states match. memoryKind = ByrefExposed; } assert(memoryKind < MemoryKindCount); Compiler* compRoot = impInlineRoot(); if (compRoot->m_memorySsaMap[memoryKind] == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap)); compRoot->m_memorySsaMap[memoryKind] = new (ialloc) NodeToUnsignedMap(ialloc); } return compRoot->m_memorySsaMap[memoryKind]; } // The Refany type is the only struct type whose structure is implicitly assumed by IL. We need its fields. CORINFO_CLASS_HANDLE m_refAnyClass; CORINFO_FIELD_HANDLE GetRefanyDataField() { if (m_refAnyClass == nullptr) { m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); } return info.compCompHnd->getFieldInClass(m_refAnyClass, 0); } CORINFO_FIELD_HANDLE GetRefanyTypeField() { if (m_refAnyClass == nullptr) { m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); } return info.compCompHnd->getFieldInClass(m_refAnyClass, 1); } #if VARSET_COUNTOPS static BitSetSupport::BitSetOpCounter m_varsetOpCounter; #endif #if ALLVARSET_COUNTOPS static BitSetSupport::BitSetOpCounter m_allvarsetOpCounter; #endif static HelperCallProperties s_helperCallProperties; #ifdef UNIX_AMD64_ABI static var_types GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size); static var_types GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, unsigned slotNum); static void GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1); void GetStructTypeOffset(CORINFO_CLASS_HANDLE typeHnd, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1); #endif // defined(UNIX_AMD64_ABI) void fgMorphMultiregStructArgs(GenTreeCall* call); GenTree* fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr); bool killGCRefs(GenTree* tree); }; // end of class Compiler //--------------------------------------------------------------------------------------------------------------------- // GenTreeVisitor: a flexible tree walker implemented using the curiously-recurring-template pattern. // // This class implements a configurable walker for IR trees. There are five configuration options (defaults values are // shown in parentheses): // // - ComputeStack (false): when true, the walker will push each node onto the `m_ancestors` stack. "Ancestors" is a bit // of a misnomer, as the first entry will always be the current node. // // - DoPreOrder (false): when true, the walker will invoke `TVisitor::PreOrderVisit` with the current node as an // argument before visiting the node's operands. // // - DoPostOrder (false): when true, the walker will invoke `TVisitor::PostOrderVisit` with the current node as an // argument after visiting the node's operands. // // - DoLclVarsOnly (false): when true, the walker will only invoke `TVisitor::PreOrderVisit` for lclVar nodes. // `DoPreOrder` must be true if this option is true. // // - UseExecutionOrder (false): when true, then walker will visit a node's operands in execution order (e.g. if a // binary operator has the `GTF_REVERSE_OPS` flag set, the second operand will be // visited before the first). // // At least one of `DoPreOrder` and `DoPostOrder` must be specified. // // A simple pre-order visitor might look something like the following: // // class CountingVisitor final : public GenTreeVisitor<CountingVisitor> // { // public: // enum // { // DoPreOrder = true // }; // // unsigned m_count; // // CountingVisitor(Compiler* compiler) // : GenTreeVisitor<CountingVisitor>(compiler), m_count(0) // { // } // // Compiler::fgWalkResult PreOrderVisit(GenTree* node) // { // m_count++; // } // }; // // This visitor would then be used like so: // // CountingVisitor countingVisitor(compiler); // countingVisitor.WalkTree(root); // template <typename TVisitor> class GenTreeVisitor { protected: typedef Compiler::fgWalkResult fgWalkResult; enum { ComputeStack = false, DoPreOrder = false, DoPostOrder = false, DoLclVarsOnly = false, UseExecutionOrder = false, }; Compiler* m_compiler; ArrayStack<GenTree*> m_ancestors; GenTreeVisitor(Compiler* compiler) : m_compiler(compiler), m_ancestors(compiler->getAllocator(CMK_ArrayStack)) { assert(compiler != nullptr); static_assert_no_msg(TVisitor::DoPreOrder || TVisitor::DoPostOrder); static_assert_no_msg(!TVisitor::DoLclVarsOnly || TVisitor::DoPreOrder); } fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { return fgWalkResult::WALK_CONTINUE; } fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { return fgWalkResult::WALK_CONTINUE; } public: fgWalkResult WalkTree(GenTree** use, GenTree* user) { assert(use != nullptr); GenTree* node = *use; if (TVisitor::ComputeStack) { m_ancestors.Push(node); } fgWalkResult result = fgWalkResult::WALK_CONTINUE; if (TVisitor::DoPreOrder && !TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } node = *use; if ((node == nullptr) || (result == fgWalkResult::WALK_SKIP_SUBTREES)) { goto DONE; } } switch (node->OperGet()) { // Leaf lclVars case GT_LCL_VAR: case GT_LCL_FLD: case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: if (TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } } FALLTHROUGH; // Leaf nodes case GT_CATCH_ARG: case GT_LABEL: case GT_FTN_ADDR: case GT_RET_EXPR: case GT_CNS_INT: case GT_CNS_LNG: case GT_CNS_DBL: case GT_CNS_STR: case GT_MEMORYBARRIER: case GT_JMP: case GT_JCC: case GT_SETCC: case GT_NO_OP: case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: #if !defined(FEATURE_EH_FUNCLETS) case GT_END_LFIN: #endif // !FEATURE_EH_FUNCLETS case GT_PHI_ARG: case GT_JMPTABLE: case GT_CLS_VAR: case GT_CLS_VAR_ADDR: case GT_ARGPLACE: case GT_PHYSREG: case GT_EMITNOP: case GT_PINVOKE_PROLOG: case GT_PINVOKE_EPILOG: case GT_IL_OFFSET: break; // Lclvar unary operators case GT_STORE_LCL_VAR: case GT_STORE_LCL_FLD: if (TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } } FALLTHROUGH; // Standard unary operators case GT_NOT: case GT_NEG: case GT_BSWAP: case GT_BSWAP16: case GT_COPY: case GT_RELOAD: case GT_ARR_LENGTH: case GT_CAST: case GT_BITCAST: case GT_CKFINITE: case GT_LCLHEAP: case GT_ADDR: case GT_IND: case GT_OBJ: case GT_BLK: case GT_BOX: case GT_ALLOCOBJ: case GT_INIT_VAL: case GT_JTRUE: case GT_SWITCH: case GT_NULLCHECK: case GT_PUTARG_REG: case GT_PUTARG_STK: case GT_PUTARG_TYPE: case GT_RETURNTRAP: case GT_NOP: case GT_FIELD: case GT_RETURN: case GT_RETFILT: case GT_RUNTIMELOOKUP: case GT_KEEPALIVE: case GT_INC_SATURATE: { GenTreeUnOp* const unOp = node->AsUnOp(); if (unOp->gtOp1 != nullptr) { result = WalkTree(&unOp->gtOp1, unOp); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } // Special nodes case GT_PHI: for (GenTreePhi::Use& use : node->AsPhi()->Uses()) { result = WalkTree(&use.NodeRef(), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; case GT_FIELD_LIST: for (GenTreeFieldList::Use& use : node->AsFieldList()->Uses()) { result = WalkTree(&use.NodeRef(), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; case GT_CMPXCHG: { GenTreeCmpXchg* const cmpXchg = node->AsCmpXchg(); result = WalkTree(&cmpXchg->gtOpLocation, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&cmpXchg->gtOpValue, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&cmpXchg->gtOpComparand, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_ARR_ELEM: { GenTreeArrElem* const arrElem = node->AsArrElem(); result = WalkTree(&arrElem->gtArrObj, arrElem); if (result == fgWalkResult::WALK_ABORT) { return result; } const unsigned rank = arrElem->gtArrRank; for (unsigned dim = 0; dim < rank; dim++) { result = WalkTree(&arrElem->gtArrInds[dim], arrElem); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } case GT_ARR_OFFSET: { GenTreeArrOffs* const arrOffs = node->AsArrOffs(); result = WalkTree(&arrOffs->gtOffset, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&arrOffs->gtIndex, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&arrOffs->gtArrObj, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_STORE_DYN_BLK: { GenTreeStoreDynBlk* const dynBlock = node->AsStoreDynBlk(); GenTree** op1Use = &dynBlock->gtOp1; GenTree** op2Use = &dynBlock->gtOp2; GenTree** op3Use = &dynBlock->gtDynamicSize; result = WalkTree(op1Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(op2Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(op3Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_CALL: { GenTreeCall* const call = node->AsCall(); if (call->gtCallThisArg != nullptr) { result = WalkTree(&call->gtCallThisArg->NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } for (GenTreeCall::Use& use : call->Args()) { result = WalkTree(&use.NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } for (GenTreeCall::Use& use : call->LateArgs()) { result = WalkTree(&use.NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (call->gtCallType == CT_INDIRECT) { if (call->gtCallCookie != nullptr) { result = WalkTree(&call->gtCallCookie, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } result = WalkTree(&call->gtCallAddr, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (call->gtControlExpr != nullptr) { result = WalkTree(&call->gtControlExpr, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) #if defined(FEATURE_SIMD) case GT_SIMD: #endif #if defined(FEATURE_HW_INTRINSICS) case GT_HWINTRINSIC: #endif if (TVisitor::UseExecutionOrder && node->IsReverseOp()) { assert(node->AsMultiOp()->GetOperandCount() == 2); result = WalkTree(&node->AsMultiOp()->Op(2), node); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&node->AsMultiOp()->Op(1), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } else { for (GenTree** use : node->AsMultiOp()->UseEdges()) { result = WalkTree(use, node); if (result == fgWalkResult::WALK_ABORT) { return result; } } } break; #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) // Binary nodes default: { assert(node->OperIsBinary()); GenTreeOp* const op = node->AsOp(); GenTree** op1Use = &op->gtOp1; GenTree** op2Use = &op->gtOp2; if (TVisitor::UseExecutionOrder && node->IsReverseOp()) { std::swap(op1Use, op2Use); } if (*op1Use != nullptr) { result = WalkTree(op1Use, op); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (*op2Use != nullptr) { result = WalkTree(op2Use, op); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } } DONE: // Finally, visit the current node if (TVisitor::DoPostOrder) { result = reinterpret_cast<TVisitor*>(this)->PostOrderVisit(use, user); } if (TVisitor::ComputeStack) { m_ancestors.Pop(); } return result; } }; template <bool computeStack, bool doPreOrder, bool doPostOrder, bool doLclVarsOnly, bool useExecutionOrder> class GenericTreeWalker final : public GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>> { public: enum { ComputeStack = computeStack, DoPreOrder = doPreOrder, DoPostOrder = doPostOrder, DoLclVarsOnly = doLclVarsOnly, UseExecutionOrder = useExecutionOrder, }; private: Compiler::fgWalkData* m_walkData; public: GenericTreeWalker(Compiler::fgWalkData* walkData) : GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>>( walkData->compiler) , m_walkData(walkData) { assert(walkData != nullptr); if (computeStack) { walkData->parentStack = &this->m_ancestors; } } Compiler::fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { m_walkData->parent = user; return m_walkData->wtprVisitorFn(use, m_walkData); } Compiler::fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { m_walkData->parent = user; return m_walkData->wtpoVisitorFn(use, m_walkData); } }; // A dominator tree visitor implemented using the curiously-recurring-template pattern, similar to GenTreeVisitor. template <typename TVisitor> class DomTreeVisitor { protected: Compiler* const m_compiler; DomTreeNode* const m_domTree; DomTreeVisitor(Compiler* compiler, DomTreeNode* domTree) : m_compiler(compiler), m_domTree(domTree) { } void Begin() { } void PreOrderVisit(BasicBlock* block) { } void PostOrderVisit(BasicBlock* block) { } void End() { } public: //------------------------------------------------------------------------ // WalkTree: Walk the dominator tree, starting from fgFirstBB. // // Notes: // This performs a non-recursive, non-allocating walk of the tree by using // DomTreeNode's firstChild and nextSibling links to locate the children of // a node and BasicBlock's bbIDom parent link to go back up the tree when // no more children are left. // // Forests are also supported, provided that all the roots are chained via // DomTreeNode::nextSibling to fgFirstBB. // void WalkTree() { static_cast<TVisitor*>(this)->Begin(); for (BasicBlock *next, *block = m_compiler->fgFirstBB; block != nullptr; block = next) { static_cast<TVisitor*>(this)->PreOrderVisit(block); next = m_domTree[block->bbNum].firstChild; if (next != nullptr) { assert(next->bbIDom == block); continue; } do { static_cast<TVisitor*>(this)->PostOrderVisit(block); next = m_domTree[block->bbNum].nextSibling; if (next != nullptr) { assert(next->bbIDom == block->bbIDom); break; } block = block->bbIDom; } while (block != nullptr); } static_cast<TVisitor*>(this)->End(); } }; // EHClauses: adapter class for forward iteration of the exception handling table using range-based `for`, e.g.: // for (EHblkDsc* const ehDsc : EHClauses(compiler)) // class EHClauses { EHblkDsc* m_begin; EHblkDsc* m_end; // Forward iterator for the exception handling table entries. Iteration is in table order. // class iterator { EHblkDsc* m_ehDsc; public: iterator(EHblkDsc* ehDsc) : m_ehDsc(ehDsc) { } EHblkDsc* operator*() const { return m_ehDsc; } iterator& operator++() { ++m_ehDsc; return *this; } bool operator!=(const iterator& i) const { return m_ehDsc != i.m_ehDsc; } }; public: EHClauses(Compiler* comp) : m_begin(comp->compHndBBtab), m_end(comp->compHndBBtab + comp->compHndBBtabCount) { assert((m_begin != nullptr) || (m_begin == m_end)); } iterator begin() const { return iterator(m_begin); } iterator end() const { return iterator(m_end); } }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Miscellaneous Compiler stuff XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // Values used to mark the types a stack slot is used for const unsigned TYPE_REF_INT = 0x01; // slot used as a 32-bit int const unsigned TYPE_REF_LNG = 0x02; // slot used as a 64-bit long const unsigned TYPE_REF_FLT = 0x04; // slot used as a 32-bit float const unsigned TYPE_REF_DBL = 0x08; // slot used as a 64-bit float const unsigned TYPE_REF_PTR = 0x10; // slot used as a 32-bit pointer const unsigned TYPE_REF_BYR = 0x20; // slot used as a byref pointer const unsigned TYPE_REF_STC = 0x40; // slot used as a struct const unsigned TYPE_REF_TYPEMASK = 0x7F; // bits that represent the type // const unsigned TYPE_REF_ADDR_TAKEN = 0x80; // slots address was taken /***************************************************************************** * * Variables to keep track of total code amounts. */ #if DISPLAY_SIZES extern size_t grossVMsize; extern size_t grossNCsize; extern size_t totalNCsize; extern unsigned genMethodICnt; extern unsigned genMethodNCnt; extern size_t gcHeaderISize; extern size_t gcPtrMapISize; extern size_t gcHeaderNSize; extern size_t gcPtrMapNSize; #endif // DISPLAY_SIZES /***************************************************************************** * * Variables to keep track of basic block counts (more data on 1 BB methods) */ #if COUNT_BASIC_BLOCKS extern Histogram bbCntTable; extern Histogram bbOneBBSizeTable; #endif /***************************************************************************** * * Used by optFindNaturalLoops to gather statistical information such as * - total number of natural loops * - number of loops with 1, 2, ... exit conditions * - number of loops that have an iterator (for like) * - number of loops that have a constant iterator */ #if COUNT_LOOPS extern unsigned totalLoopMethods; // counts the total number of methods that have natural loops extern unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has extern unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent extern unsigned totalLoopCount; // counts the total number of natural loops extern unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops extern unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent extern unsigned iterLoopCount; // counts the # of loops with an iterator (for like) extern unsigned simpleTestLoopCount; // counts the # of loops with an iterator and a simple loop condition (iter < // const) extern unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like) extern bool hasMethodLoops; // flag to keep track if we already counted a method as having loops extern unsigned loopsThisMethod; // counts the number of loops in the current method extern bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method. extern Histogram loopCountTable; // Histogram of loop counts extern Histogram loopExitCountTable; // Histogram of loop exit counts #endif // COUNT_LOOPS /***************************************************************************** * variables to keep track of how many iterations we go in a dataflow pass */ #if DATAFLOW_ITER extern unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow extern unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow #endif // DATAFLOW_ITER #if MEASURE_BLOCK_SIZE extern size_t genFlowNodeSize; extern size_t genFlowNodeCnt; #endif // MEASURE_BLOCK_SIZE #if MEASURE_NODE_SIZE struct NodeSizeStats { void Init() { genTreeNodeCnt = 0; genTreeNodeSize = 0; genTreeNodeActualSize = 0; } // Count of tree nodes allocated. unsigned __int64 genTreeNodeCnt; // The size we allocate. unsigned __int64 genTreeNodeSize; // The actual size of the node. Note that the actual size will likely be smaller // than the allocated size, but we sometimes use SetOper()/ChangeOper() to change // a smaller node to a larger one. TODO-Cleanup: add stats on // SetOper()/ChangeOper() usage to quantify this. unsigned __int64 genTreeNodeActualSize; }; extern NodeSizeStats genNodeSizeStats; // Total node size stats extern NodeSizeStats genNodeSizeStatsPerFunc; // Per-function node size stats extern Histogram genTreeNcntHist; extern Histogram genTreeNsizHist; #endif // MEASURE_NODE_SIZE /***************************************************************************** * Count fatal errors (including noway_asserts). */ #if MEASURE_FATAL extern unsigned fatal_badCode; extern unsigned fatal_noWay; extern unsigned fatal_implLimitation; extern unsigned fatal_NOMEM; extern unsigned fatal_noWayAssertBody; #ifdef DEBUG extern unsigned fatal_noWayAssertBodyArgs; #endif // DEBUG extern unsigned fatal_NYI; #endif // MEASURE_FATAL /***************************************************************************** * Codegen */ #ifdef TARGET_XARCH const instruction INS_SHIFT_LEFT_LOGICAL = INS_shl; const instruction INS_SHIFT_RIGHT_LOGICAL = INS_shr; const instruction INS_SHIFT_RIGHT_ARITHM = INS_sar; const instruction INS_AND = INS_and; const instruction INS_OR = INS_or; const instruction INS_XOR = INS_xor; const instruction INS_NEG = INS_neg; const instruction INS_TEST = INS_test; const instruction INS_MUL = INS_imul; const instruction INS_SIGNED_DIVIDE = INS_idiv; const instruction INS_UNSIGNED_DIVIDE = INS_div; const instruction INS_BREAKPOINT = INS_int3; const instruction INS_ADDC = INS_adc; const instruction INS_SUBC = INS_sbb; const instruction INS_NOT = INS_not; #endif // TARGET_XARCH #ifdef TARGET_ARM const instruction INS_SHIFT_LEFT_LOGICAL = INS_lsl; const instruction INS_SHIFT_RIGHT_LOGICAL = INS_lsr; const instruction INS_SHIFT_RIGHT_ARITHM = INS_asr; const instruction INS_AND = INS_and; const instruction INS_OR = INS_orr; const instruction INS_XOR = INS_eor; const instruction INS_NEG = INS_rsb; const instruction INS_TEST = INS_tst; const instruction INS_MUL = INS_mul; const instruction INS_MULADD = INS_mla; const instruction INS_SIGNED_DIVIDE = INS_sdiv; const instruction INS_UNSIGNED_DIVIDE = INS_udiv; const instruction INS_BREAKPOINT = INS_bkpt; const instruction INS_ADDC = INS_adc; const instruction INS_SUBC = INS_sbc; const instruction INS_NOT = INS_mvn; const instruction INS_ABS = INS_vabs; const instruction INS_SQRT = INS_vsqrt; #endif // TARGET_ARM #ifdef TARGET_ARM64 const instruction INS_MULADD = INS_madd; inline const instruction INS_BREAKPOINT_osHelper() { // GDB needs the encoding of brk #0 // Windbg needs the encoding of brk #F000 return TargetOS::IsUnix ? INS_brk_unix : INS_brk_windows; } #define INS_BREAKPOINT INS_BREAKPOINT_osHelper() const instruction INS_ABS = INS_fabs; const instruction INS_SQRT = INS_fsqrt; #endif // TARGET_ARM64 /*****************************************************************************/ extern const BYTE genTypeSizes[]; extern const BYTE genTypeAlignments[]; extern const BYTE genTypeStSzs[]; extern const BYTE genActualTypes[]; /*****************************************************************************/ #ifdef DEBUG void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars); #endif // DEBUG #include "compiler.hpp" // All the shared inline functions /*****************************************************************************/ #endif //_COMPILER_H_ /*****************************************************************************/
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XX Represents the method data we are currently JIT-compiling. XX XX An instance of this class is created for every method we JIT. XX XX This contains all the info needed for the method. So allocating a XX XX a new instance per method makes it thread-safe. XX XX It should be used to do all the memory management for the compiler run. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ #ifndef _COMPILER_H_ #define _COMPILER_H_ /*****************************************************************************/ #include "jit.h" #include "opcode.h" #include "varset.h" #include "jitstd.h" #include "jithashtable.h" #include "gentree.h" #include "debuginfo.h" #include "lir.h" #include "block.h" #include "inline.h" #include "jiteh.h" #include "instr.h" #include "regalloc.h" #include "sm.h" #include "cycletimer.h" #include "blockset.h" #include "arraystack.h" #include "hashbv.h" #include "jitexpandarray.h" #include "tinyarray.h" #include "valuenum.h" #include "jittelemetry.h" #include "namedintrinsiclist.h" #ifdef LATE_DISASM #include "disasm.h" #endif #include "codegeninterface.h" #include "regset.h" #include "jitgcinfo.h" #if DUMP_GC_TABLES && defined(JIT32_GCENCODER) #include "gcdump.h" #endif #include "emit.h" #include "hwintrinsic.h" #include "simd.h" #include "simdashwintrinsic.h" // This is only used locally in the JIT to indicate that // a verification block should be inserted #define SEH_VERIFICATION_EXCEPTION 0xe0564552 // VER /***************************************************************************** * Forward declarations */ struct InfoHdr; // defined in GCInfo.h struct escapeMapping_t; // defined in fgdiagnostic.cpp class emitter; // defined in emit.h struct ShadowParamVarInfo; // defined in GSChecks.cpp struct InitVarDscInfo; // defined in register_arg_convention.h class FgStack; // defined in fgbasic.cpp class Instrumentor; // defined in fgprofile.cpp class SpanningTreeVisitor; // defined in fgprofile.cpp class CSE_DataFlow; // defined in OptCSE.cpp class OptBoolsDsc; // defined in optimizer.cpp #ifdef DEBUG struct IndentStack; #endif class Lowering; // defined in lower.h // The following are defined in this file, Compiler.h class Compiler; /***************************************************************************** * Unwind info */ #include "unwind.h" /*****************************************************************************/ // // Declare global operator new overloads that use the compiler's arena allocator // // I wanted to make the second argument optional, with default = CMK_Unknown, but that // caused these to be ambiguous with the global placement new operators. void* __cdecl operator new(size_t n, Compiler* context, CompMemKind cmk); void* __cdecl operator new[](size_t n, Compiler* context, CompMemKind cmk); void* __cdecl operator new(size_t n, void* p, const jitstd::placement_t& syntax_difference); // Requires the definitions of "operator new" so including "LoopCloning.h" after the definitions. #include "loopcloning.h" /*****************************************************************************/ /* This is included here and not earlier as it needs the definition of "CSE" * which is defined in the section above */ /*****************************************************************************/ unsigned genLog2(unsigned value); unsigned genLog2(unsigned __int64 value); unsigned ReinterpretHexAsDecimal(unsigned in); /*****************************************************************************/ const unsigned FLG_CCTOR = (CORINFO_FLG_CONSTRUCTOR | CORINFO_FLG_STATIC); #ifdef DEBUG const int BAD_STK_OFFS = 0xBAADF00D; // for LclVarDsc::lvStkOffs #endif //------------------------------------------------------------------------ // HFA info shared by LclVarDsc and fgArgTabEntry //------------------------------------------------------------------------ inline bool IsHfa(CorInfoHFAElemType kind) { return kind != CORINFO_HFA_ELEM_NONE; } inline var_types HfaTypeFromElemKind(CorInfoHFAElemType kind) { switch (kind) { case CORINFO_HFA_ELEM_FLOAT: return TYP_FLOAT; case CORINFO_HFA_ELEM_DOUBLE: return TYP_DOUBLE; #ifdef FEATURE_SIMD case CORINFO_HFA_ELEM_VECTOR64: return TYP_SIMD8; case CORINFO_HFA_ELEM_VECTOR128: return TYP_SIMD16; #endif case CORINFO_HFA_ELEM_NONE: return TYP_UNDEF; default: assert(!"Invalid HfaElemKind"); return TYP_UNDEF; } } inline CorInfoHFAElemType HfaElemKindFromType(var_types type) { switch (type) { case TYP_FLOAT: return CORINFO_HFA_ELEM_FLOAT; case TYP_DOUBLE: return CORINFO_HFA_ELEM_DOUBLE; #ifdef FEATURE_SIMD case TYP_SIMD8: return CORINFO_HFA_ELEM_VECTOR64; case TYP_SIMD16: return CORINFO_HFA_ELEM_VECTOR128; #endif case TYP_UNDEF: return CORINFO_HFA_ELEM_NONE; default: assert(!"Invalid HFA Type"); return CORINFO_HFA_ELEM_NONE; } } // The following holds the Local var info (scope information) typedef const char* VarName; // Actual ASCII string struct VarScopeDsc { unsigned vsdVarNum; // (remapped) LclVarDsc number unsigned vsdLVnum; // 'which' in eeGetLVinfo(). // Also, it is the index of this entry in the info.compVarScopes array, // which is useful since the array is also accessed via the // compEnterScopeList and compExitScopeList sorted arrays. IL_OFFSET vsdLifeBeg; // instr offset of beg of life IL_OFFSET vsdLifeEnd; // instr offset of end of life #ifdef DEBUG VarName vsdName; // name of the var #endif }; // This class stores information associated with a LclVar SSA definition. class LclSsaVarDsc { // The basic block where the definition occurs. Definitions of uninitialized variables // are considered to occur at the start of the first basic block (fgFirstBB). // // TODO-Cleanup: In the case of uninitialized variables the block is set to nullptr by // SsaBuilder and changed to fgFirstBB during value numbering. It would be useful to // investigate and perhaps eliminate this rather unexpected behavior. BasicBlock* m_block; // The GT_ASG node that generates the definition, or nullptr for definitions // of uninitialized variables. GenTreeOp* m_asg; public: LclSsaVarDsc() : m_block(nullptr), m_asg(nullptr) { } LclSsaVarDsc(BasicBlock* block, GenTreeOp* asg) : m_block(block), m_asg(asg) { assert((asg == nullptr) || asg->OperIs(GT_ASG)); } BasicBlock* GetBlock() const { return m_block; } void SetBlock(BasicBlock* block) { m_block = block; } GenTreeOp* GetAssignment() const { return m_asg; } void SetAssignment(GenTreeOp* asg) { assert((asg == nullptr) || asg->OperIs(GT_ASG)); m_asg = asg; } ValueNumPair m_vnPair; }; // This class stores information associated with a memory SSA definition. class SsaMemDef { public: ValueNumPair m_vnPair; }; //------------------------------------------------------------------------ // SsaDefArray: A resizable array of SSA definitions. // // Unlike an ordinary resizable array implementation, this allows only element // addition (by calling AllocSsaNum) and has special handling for RESERVED_SSA_NUM // (basically it's a 1-based array). The array doesn't impose any particular // requirements on the elements it stores and AllocSsaNum forwards its arguments // to the array element constructor, this way the array supports both LclSsaVarDsc // and SsaMemDef elements. // template <typename T> class SsaDefArray { T* m_array; unsigned m_arraySize; unsigned m_count; static_assert_no_msg(SsaConfig::RESERVED_SSA_NUM == 0); static_assert_no_msg(SsaConfig::FIRST_SSA_NUM == 1); // Get the minimum valid SSA number. unsigned GetMinSsaNum() const { return SsaConfig::FIRST_SSA_NUM; } // Increase (double) the size of the array. void GrowArray(CompAllocator alloc) { unsigned oldSize = m_arraySize; unsigned newSize = max(2, oldSize * 2); T* newArray = alloc.allocate<T>(newSize); for (unsigned i = 0; i < oldSize; i++) { newArray[i] = m_array[i]; } m_array = newArray; m_arraySize = newSize; } public: // Construct an empty SsaDefArray. SsaDefArray() : m_array(nullptr), m_arraySize(0), m_count(0) { } // Reset the array (used only if the SSA form is reconstructed). void Reset() { m_count = 0; } // Allocate a new SSA number (starting with SsaConfig::FIRST_SSA_NUM). template <class... Args> unsigned AllocSsaNum(CompAllocator alloc, Args&&... args) { if (m_count == m_arraySize) { GrowArray(alloc); } unsigned ssaNum = GetMinSsaNum() + m_count; m_array[m_count++] = T(std::forward<Args>(args)...); // Ensure that the first SSA number we allocate is SsaConfig::FIRST_SSA_NUM assert((ssaNum == SsaConfig::FIRST_SSA_NUM) || (m_count > 1)); return ssaNum; } // Get the number of SSA definitions in the array. unsigned GetCount() const { return m_count; } // Get a pointer to the SSA definition at the specified index. T* GetSsaDefByIndex(unsigned index) { assert(index < m_count); return &m_array[index]; } // Check if the specified SSA number is valid. bool IsValidSsaNum(unsigned ssaNum) const { return (GetMinSsaNum() <= ssaNum) && (ssaNum < (GetMinSsaNum() + m_count)); } // Get a pointer to the SSA definition associated with the specified SSA number. T* GetSsaDef(unsigned ssaNum) { assert(ssaNum != SsaConfig::RESERVED_SSA_NUM); return GetSsaDefByIndex(ssaNum - GetMinSsaNum()); } // Get an SSA number associated with the specified SSA def (that must be in this array). unsigned GetSsaNum(T* ssaDef) { assert((m_array <= ssaDef) && (ssaDef < &m_array[m_count])); return GetMinSsaNum() + static_cast<unsigned>(ssaDef - &m_array[0]); } }; enum RefCountState { RCS_INVALID, // not valid to get/set ref counts RCS_EARLY, // early counts for struct promotion and struct passing RCS_NORMAL, // normal ref counts (from lvaMarkRefs onward) }; #ifdef DEBUG // Reasons why we can't enregister a local. enum class DoNotEnregisterReason { None, AddrExposed, // the address of this local is exposed. DontEnregStructs, // struct enregistration is disabled. NotRegSizeStruct, // the struct size does not much any register size, usually the struct size is too big. LocalField, // the local is accessed with LCL_FLD, note we can do it not only for struct locals. VMNeedsStackAddr, LiveInOutOfHandler, // the local is alive in and out of exception handler and not signle def. BlockOp, // Is read or written via a block operation. IsStructArg, // Is a struct passed as an argument in a way that requires a stack location. DepField, // It is a field of a dependently promoted struct NoRegVars, // opts.compFlags & CLFLG_REGVAR is not set MinOptsGC, // It is a GC Ref and we are compiling MinOpts #if !defined(TARGET_64BIT) LongParamField, // It is a decomposed field of a long parameter. #endif #ifdef JIT32_GCENCODER PinningRef, #endif LclAddrNode, // the local is accessed with LCL_ADDR_VAR/FLD. CastTakesAddr, StoreBlkSrc, // the local is used as STORE_BLK source. OneAsgRetyping, // fgMorphOneAsgBlockOp prevents this local from being enregister. SwizzleArg, // the local is passed using LCL_FLD as another type. BlockOpRet, // the struct is returned and it promoted or there is a cast. ReturnSpCheck, // the local is used to do SP check SimdUserForcesDep // a promoted struct was used by a SIMD/HWI node; it must be dependently promoted }; enum class AddressExposedReason { NONE, PARENT_EXPOSED, // This is a promoted field but the parent is exposed. TOO_CONSERVATIVE, // Were marked as exposed to be conservative, fix these places. ESCAPE_ADDRESS, // The address is escaping, for example, passed as call argument. WIDE_INDIR, // We access via indirection with wider type. OSR_EXPOSED, // It was exposed in the original method, osr has to repeat it. STRESS_LCL_FLD, // Stress mode replaces localVar with localFld and makes them addrExposed. COPY_FLD_BY_FLD, // Field by field copy takes the address of the local, can be fixed. DISPATCH_RET_BUF // Caller return buffer dispatch. }; #endif // DEBUG class LclVarDsc { public: // The constructor. Most things can just be zero'ed. // // Initialize the ArgRegs to REG_STK. // Morph will update if this local is passed in a register. LclVarDsc() : _lvArgReg(REG_STK) , #if FEATURE_MULTIREG_ARGS _lvOtherArgReg(REG_STK) , #endif // FEATURE_MULTIREG_ARGS lvClassHnd(NO_CLASS_HANDLE) , lvRefBlks(BlockSetOps::UninitVal()) , lvPerSsaData() { } // note this only packs because var_types is a typedef of unsigned char var_types lvType : 5; // TYP_INT/LONG/FLOAT/DOUBLE/REF unsigned char lvIsParam : 1; // is this a parameter? unsigned char lvIsRegArg : 1; // is this an argument that was passed by register? unsigned char lvFramePointerBased : 1; // 0 = off of REG_SPBASE (e.g., ESP), 1 = off of REG_FPBASE (e.g., EBP) unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame unsigned char lvRegister : 1; // assigned to live in a register? For RyuJIT backend, this is only set if the // variable is in the same register for the entire function. unsigned char lvTracked : 1; // is this a tracked variable? bool lvTrackedNonStruct() { return lvTracked && lvType != TYP_STRUCT; } unsigned char lvPinned : 1; // is this a pinned variable? unsigned char lvMustInit : 1; // must be initialized private: bool m_addrExposed : 1; // The address of this variable is "exposed" -- passed as an argument, stored in a // global location, etc. // We cannot reason reliably about the value of the variable. public: unsigned char lvDoNotEnregister : 1; // Do not enregister this variable. unsigned char lvFieldAccessed : 1; // The var is a struct local, and a field of the variable is accessed. Affects // struct promotion. unsigned char lvLiveInOutOfHndlr : 1; // The variable is live in or out of an exception handler, and therefore must // be on the stack (at least at those boundaries.) unsigned char lvInSsa : 1; // The variable is in SSA form (set by SsaBuilder) unsigned char lvIsCSE : 1; // Indicates if this LclVar is a CSE variable. unsigned char lvHasLdAddrOp : 1; // has ldloca or ldarga opcode on this local. unsigned char lvStackByref : 1; // This is a compiler temporary of TYP_BYREF that is known to point into our local // stack frame. unsigned char lvHasILStoreOp : 1; // there is at least one STLOC or STARG on this local unsigned char lvHasMultipleILStoreOp : 1; // there is more than one STLOC on this local unsigned char lvIsTemp : 1; // Short-lifetime compiler temp #if defined(TARGET_AMD64) || defined(TARGET_ARM64) unsigned char lvIsImplicitByRef : 1; // Set if the argument is an implicit byref. #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) unsigned char lvIsBoolean : 1; // set if variable is boolean unsigned char lvSingleDef : 1; // variable has a single def // before lvaMarkLocalVars: identifies ref type locals that can get type updates // after lvaMarkLocalVars: identifies locals that are suitable for optAddCopies unsigned char lvSingleDefRegCandidate : 1; // variable has a single def and hence is a register candidate // Currently, this is only used to decide if an EH variable can be // a register candiate or not. unsigned char lvDisqualifySingleDefRegCandidate : 1; // tracks variable that are disqualified from register // candidancy unsigned char lvSpillAtSingleDef : 1; // variable has a single def (as determined by LSRA interval scan) // and is spilled making it candidate to spill right after the // first (and only) definition. // Note: We cannot reuse lvSingleDefRegCandidate because it is set // in earlier phase and the information might not be appropriate // in LSRA. unsigned char lvDisqualify : 1; // variable is no longer OK for add copy optimization unsigned char lvVolatileHint : 1; // hint for AssertionProp #ifndef TARGET_64BIT unsigned char lvStructDoubleAlign : 1; // Must we double align this struct? #endif // !TARGET_64BIT #ifdef TARGET_64BIT unsigned char lvQuirkToLong : 1; // Quirk to allocate this LclVar as a 64-bit long #endif #ifdef DEBUG unsigned char lvKeepType : 1; // Don't change the type of this variable unsigned char lvNoLclFldStress : 1; // Can't apply local field stress on this one #endif unsigned char lvIsPtr : 1; // Might this be used in an address computation? (used by buffer overflow security // checks) unsigned char lvIsUnsafeBuffer : 1; // Does this contain an unsafe buffer requiring buffer overflow security checks? unsigned char lvPromoted : 1; // True when this local is a promoted struct, a normed struct, or a "split" long on a // 32-bit target. For implicit byref parameters, this gets hijacked between // fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to indicate whether // references to the arg are being rewritten as references to a promoted shadow local. unsigned char lvIsStructField : 1; // Is this local var a field of a promoted struct local? unsigned char lvOverlappingFields : 1; // True when we have a struct with possibly overlapping fields unsigned char lvContainsHoles : 1; // True when we have a promoted struct that contains holes unsigned char lvCustomLayout : 1; // True when this struct has "CustomLayout" unsigned char lvIsMultiRegArg : 1; // true if this is a multireg LclVar struct used in an argument context unsigned char lvIsMultiRegRet : 1; // true if this is a multireg LclVar struct assigned from a multireg call #ifdef FEATURE_HFA_FIELDS_PRESENT CorInfoHFAElemType _lvHfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA). #endif // FEATURE_HFA_FIELDS_PRESENT #ifdef DEBUG // TODO-Cleanup: See the note on lvSize() - this flag is only in use by asserts that are checking for struct // types, and is needed because of cases where TYP_STRUCT is bashed to an integral type. // Consider cleaning this up so this workaround is not required. unsigned char lvUnusedStruct : 1; // All references to this promoted struct are through its field locals. // I.e. there is no longer any reference to the struct directly. // In this case we can simply remove this struct local. unsigned char lvUndoneStructPromotion : 1; // The struct promotion was undone and hence there should be no // reference to the fields of this struct. #endif unsigned char lvLRACandidate : 1; // Tracked for linear scan register allocation purposes #ifdef FEATURE_SIMD // Note that both SIMD vector args and locals are marked as lvSIMDType = true, but the // type of an arg node is TYP_BYREF and a local node is TYP_SIMD*. unsigned char lvSIMDType : 1; // This is a SIMD struct unsigned char lvUsedInSIMDIntrinsic : 1; // This tells lclvar is used for simd intrinsic unsigned char lvSimdBaseJitType : 5; // Note: this only packs because CorInfoType has less than 32 entries CorInfoType GetSimdBaseJitType() const { return (CorInfoType)lvSimdBaseJitType; } void SetSimdBaseJitType(CorInfoType simdBaseJitType) { assert(simdBaseJitType < (1 << 5)); lvSimdBaseJitType = (unsigned char)simdBaseJitType; } var_types GetSimdBaseType() const; #endif // FEATURE_SIMD unsigned char lvRegStruct : 1; // This is a reg-sized non-field-addressed struct. unsigned char lvClassIsExact : 1; // lvClassHandle is the exact type #ifdef DEBUG unsigned char lvClassInfoUpdated : 1; // true if this var has updated class handle or exactness #endif unsigned char lvImplicitlyReferenced : 1; // true if there are non-IR references to this local (prolog, epilog, gc, // eh) unsigned char lvSuppressedZeroInit : 1; // local needs zero init if we transform tail call to loop unsigned char lvHasExplicitInit : 1; // The local is explicitly initialized and doesn't need zero initialization in // the prolog. If the local has gc pointers, there are no gc-safe points // between the prolog and the explicit initialization. union { unsigned lvFieldLclStart; // The index of the local var representing the first field in the promoted struct // local. For implicit byref parameters, this gets hijacked between // fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to point to the // struct local created to model the parameter's struct promotion, if any. unsigned lvParentLcl; // The index of the local var representing the parent (i.e. the promoted struct local). // Valid on promoted struct local fields. }; unsigned char lvFieldCnt; // Number of fields in the promoted VarDsc. unsigned char lvFldOffset; unsigned char lvFldOrdinal; #ifdef DEBUG unsigned char lvSingleDefDisqualifyReason = 'H'; #endif #if FEATURE_MULTIREG_ARGS regNumber lvRegNumForSlot(unsigned slotNum) { if (slotNum == 0) { return (regNumber)_lvArgReg; } else if (slotNum == 1) { return GetOtherArgReg(); } else { assert(false && "Invalid slotNum!"); } unreached(); } #endif // FEATURE_MULTIREG_ARGS CorInfoHFAElemType GetLvHfaElemKind() const { #ifdef FEATURE_HFA_FIELDS_PRESENT return _lvHfaElemKind; #else NOWAY_MSG("GetLvHfaElemKind"); return CORINFO_HFA_ELEM_NONE; #endif // FEATURE_HFA_FIELDS_PRESENT } void SetLvHfaElemKind(CorInfoHFAElemType elemKind) { #ifdef FEATURE_HFA_FIELDS_PRESENT _lvHfaElemKind = elemKind; #else NOWAY_MSG("SetLvHfaElemKind"); #endif // FEATURE_HFA_FIELDS_PRESENT } bool lvIsHfa() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetLvHfaElemKind()); } else { return false; } } bool lvIsHfaRegArg() const { if (GlobalJitOptions::compFeatureHfa) { return lvIsRegArg && lvIsHfa(); } else { return false; } } //------------------------------------------------------------------------------ // lvHfaSlots: Get the number of slots used by an HFA local // // Return Value: // On Arm64 - Returns 1-4 indicating the number of register slots used by the HFA // On Arm32 - Returns the total number of single FP register slots used by the HFA, max is 8 // unsigned lvHfaSlots() const { assert(lvIsHfa()); assert(varTypeIsStruct(lvType)); unsigned slots = 0; #ifdef TARGET_ARM slots = lvExactSize / sizeof(float); assert(slots <= 8); #elif defined(TARGET_ARM64) switch (GetLvHfaElemKind()) { case CORINFO_HFA_ELEM_NONE: assert(!"lvHfaSlots called for non-HFA"); break; case CORINFO_HFA_ELEM_FLOAT: assert((lvExactSize % 4) == 0); slots = lvExactSize >> 2; break; case CORINFO_HFA_ELEM_DOUBLE: case CORINFO_HFA_ELEM_VECTOR64: assert((lvExactSize % 8) == 0); slots = lvExactSize >> 3; break; case CORINFO_HFA_ELEM_VECTOR128: assert((lvExactSize % 16) == 0); slots = lvExactSize >> 4; break; default: unreached(); } assert(slots <= 4); #endif // TARGET_ARM64 return slots; } // lvIsMultiRegArgOrRet() // returns true if this is a multireg LclVar struct used in an argument context // or if this is a multireg LclVar struct assigned from a multireg call bool lvIsMultiRegArgOrRet() { return lvIsMultiRegArg || lvIsMultiRegRet; } #if defined(DEBUG) private: DoNotEnregisterReason m_doNotEnregReason; AddressExposedReason m_addrExposedReason; public: void SetDoNotEnregReason(DoNotEnregisterReason reason) { m_doNotEnregReason = reason; } DoNotEnregisterReason GetDoNotEnregReason() const { return m_doNotEnregReason; } AddressExposedReason GetAddrExposedReason() const { return m_addrExposedReason; } #endif // DEBUG public: void SetAddressExposed(bool value DEBUGARG(AddressExposedReason reason)) { m_addrExposed = value; INDEBUG(m_addrExposedReason = reason); } void CleanAddressExposed() { m_addrExposed = false; } bool IsAddressExposed() const { return m_addrExposed; } private: regNumberSmall _lvRegNum; // Used to store the register this variable is in (or, the low register of a // register pair). It is set during codegen any time the // variable is enregistered (lvRegister is only set // to non-zero if the variable gets the same register assignment for its entire // lifetime). #if !defined(TARGET_64BIT) regNumberSmall _lvOtherReg; // Used for "upper half" of long var. #endif // !defined(TARGET_64BIT) regNumberSmall _lvArgReg; // The (first) register in which this argument is passed. #if FEATURE_MULTIREG_ARGS regNumberSmall _lvOtherArgReg; // Used for the second part of the struct passed in a register. // Note this is defined but not used by ARM32 #endif // FEATURE_MULTIREG_ARGS regNumberSmall _lvArgInitReg; // the register into which the argument is moved at entry public: // The register number is stored in a small format (8 bits), but the getters return and the setters take // a full-size (unsigned) format, to localize the casts here. ///////////////////// regNumber GetRegNum() const { return (regNumber)_lvRegNum; } void SetRegNum(regNumber reg) { _lvRegNum = (regNumberSmall)reg; assert(_lvRegNum == reg); } ///////////////////// #if defined(TARGET_64BIT) regNumber GetOtherReg() const { assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 // "unreachable code" warnings return REG_NA; } void SetOtherReg(regNumber reg) { assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 // "unreachable code" warnings } #else // !TARGET_64BIT regNumber GetOtherReg() const { return (regNumber)_lvOtherReg; } void SetOtherReg(regNumber reg) { _lvOtherReg = (regNumberSmall)reg; assert(_lvOtherReg == reg); } #endif // !TARGET_64BIT ///////////////////// regNumber GetArgReg() const { return (regNumber)_lvArgReg; } void SetArgReg(regNumber reg) { _lvArgReg = (regNumberSmall)reg; assert(_lvArgReg == reg); } #if FEATURE_MULTIREG_ARGS regNumber GetOtherArgReg() const { return (regNumber)_lvOtherArgReg; } void SetOtherArgReg(regNumber reg) { _lvOtherArgReg = (regNumberSmall)reg; assert(_lvOtherArgReg == reg); } #endif // FEATURE_MULTIREG_ARGS #ifdef FEATURE_SIMD // Is this is a SIMD struct? bool lvIsSIMDType() const { return lvSIMDType; } // Is this is a SIMD struct which is used for SIMD intrinsic? bool lvIsUsedInSIMDIntrinsic() const { return lvUsedInSIMDIntrinsic; } #else // If feature_simd not enabled, return false bool lvIsSIMDType() const { return false; } bool lvIsUsedInSIMDIntrinsic() const { return false; } #endif ///////////////////// regNumber GetArgInitReg() const { return (regNumber)_lvArgInitReg; } void SetArgInitReg(regNumber reg) { _lvArgInitReg = (regNumberSmall)reg; assert(_lvArgInitReg == reg); } ///////////////////// bool lvIsRegCandidate() const { return lvLRACandidate != 0; } bool lvIsInReg() const { return lvIsRegCandidate() && (GetRegNum() != REG_STK); } regMaskTP lvRegMask() const { regMaskTP regMask = RBM_NONE; if (varTypeUsesFloatReg(TypeGet())) { if (GetRegNum() != REG_STK) { regMask = genRegMaskFloat(GetRegNum(), TypeGet()); } } else { if (GetRegNum() != REG_STK) { regMask = genRegMask(GetRegNum()); } } return regMask; } unsigned short lvVarIndex; // variable tracking index private: unsigned short m_lvRefCnt; // unweighted (real) reference count. For implicit by reference // parameters, this gets hijacked from fgResetImplicitByRefRefCount // through fgMarkDemotedImplicitByRefArgs, to provide a static // appearance count (computed during address-exposed analysis) // that fgMakeOutgoingStructArgCopy consults during global morph // to determine if eliding its copy is legal. weight_t m_lvRefCntWtd; // weighted reference count public: unsigned short lvRefCnt(RefCountState state = RCS_NORMAL) const; void incLvRefCnt(unsigned short delta, RefCountState state = RCS_NORMAL); void setLvRefCnt(unsigned short newValue, RefCountState state = RCS_NORMAL); weight_t lvRefCntWtd(RefCountState state = RCS_NORMAL) const; void incLvRefCntWtd(weight_t delta, RefCountState state = RCS_NORMAL); void setLvRefCntWtd(weight_t newValue, RefCountState state = RCS_NORMAL); private: int lvStkOffs; // stack offset of home in bytes. public: int GetStackOffset() const { return lvStkOffs; } void SetStackOffset(int offset) { lvStkOffs = offset; } unsigned lvExactSize; // (exact) size of the type in bytes // Is this a promoted struct? // This method returns true only for structs (including SIMD structs), not for // locals that are split on a 32-bit target. // It is only necessary to use this: // 1) if only structs are wanted, and // 2) if Lowering has already been done. // Otherwise lvPromoted is valid. bool lvPromotedStruct() { #if !defined(TARGET_64BIT) return (lvPromoted && !varTypeIsLong(lvType)); #else // defined(TARGET_64BIT) return lvPromoted; #endif // defined(TARGET_64BIT) } unsigned lvSize() const; size_t lvArgStackSize() const; unsigned lvSlotNum; // original slot # (if remapped) typeInfo lvVerTypeInfo; // type info needed for verification // class handle for the local or null if not known or not a class, // for a struct handle use `GetStructHnd()`. CORINFO_CLASS_HANDLE lvClassHnd; // Get class handle for a struct local or implicitByRef struct local. CORINFO_CLASS_HANDLE GetStructHnd() const { #ifdef FEATURE_SIMD if (lvSIMDType && (m_layout == nullptr)) { return NO_CLASS_HANDLE; } #endif assert(m_layout != nullptr); #if defined(TARGET_AMD64) || defined(TARGET_ARM64) assert(varTypeIsStruct(TypeGet()) || (lvIsImplicitByRef && (TypeGet() == TYP_BYREF))); #else assert(varTypeIsStruct(TypeGet())); #endif CORINFO_CLASS_HANDLE structHnd = m_layout->GetClassHandle(); assert(structHnd != NO_CLASS_HANDLE); return structHnd; } CORINFO_FIELD_HANDLE lvFieldHnd; // field handle for promoted struct fields private: ClassLayout* m_layout; // layout info for structs public: BlockSet lvRefBlks; // Set of blocks that contain refs Statement* lvDefStmt; // Pointer to the statement with the single definition void lvaDisqualifyVar(); // Call to disqualify a local variable from use in optAddCopies var_types TypeGet() const { return (var_types)lvType; } bool lvStackAligned() const { assert(lvIsStructField); return ((lvFldOffset % TARGET_POINTER_SIZE) == 0); } bool lvNormalizeOnLoad() const { return varTypeIsSmall(TypeGet()) && // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore. (lvIsParam || m_addrExposed || lvIsStructField); } bool lvNormalizeOnStore() const { return varTypeIsSmall(TypeGet()) && // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore. !(lvIsParam || m_addrExposed || lvIsStructField); } void incRefCnts(weight_t weight, Compiler* pComp, RefCountState state = RCS_NORMAL, bool propagate = true); var_types GetHfaType() const { if (GlobalJitOptions::compFeatureHfa) { assert(lvIsHfa()); return HfaTypeFromElemKind(GetLvHfaElemKind()); } else { return TYP_UNDEF; } } void SetHfaType(var_types type) { if (GlobalJitOptions::compFeatureHfa) { CorInfoHFAElemType elemKind = HfaElemKindFromType(type); SetLvHfaElemKind(elemKind); // Ensure we've allocated enough bits. assert(GetLvHfaElemKind() == elemKind); } } // Returns true if this variable contains GC pointers (including being a GC pointer itself). bool HasGCPtr() const { return varTypeIsGC(lvType) || ((lvType == TYP_STRUCT) && m_layout->HasGCPtr()); } // Returns the layout of a struct variable. ClassLayout* GetLayout() const { assert(varTypeIsStruct(lvType)); return m_layout; } // Sets the layout of a struct variable. void SetLayout(ClassLayout* layout) { assert(varTypeIsStruct(lvType)); assert((m_layout == nullptr) || ClassLayout::AreCompatible(m_layout, layout)); m_layout = layout; } SsaDefArray<LclSsaVarDsc> lvPerSsaData; // Returns the address of the per-Ssa data for the given ssaNum (which is required // not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is // not an SSA variable). LclSsaVarDsc* GetPerSsaData(unsigned ssaNum) { return lvPerSsaData.GetSsaDef(ssaNum); } // Returns the SSA number for "ssaDef". Requires "ssaDef" to be a valid definition // of this variable. unsigned GetSsaNumForSsaDef(LclSsaVarDsc* ssaDef) { return lvPerSsaData.GetSsaNum(ssaDef); } var_types GetRegisterType(const GenTreeLclVarCommon* tree) const; var_types GetRegisterType() const; var_types GetActualRegisterType() const; bool IsEnregisterableType() const { return GetRegisterType() != TYP_UNDEF; } bool IsEnregisterableLcl() const { if (lvDoNotEnregister) { return false; } return IsEnregisterableType(); } //----------------------------------------------------------------------------- // IsAlwaysAliveInMemory: Determines if this variable's value is always // up-to-date on stack. This is possible if this is an EH-var or // we decided to spill after single-def. // bool IsAlwaysAliveInMemory() const { return lvLiveInOutOfHndlr || lvSpillAtSingleDef; } bool CanBeReplacedWithItsField(Compiler* comp) const; #ifdef DEBUG public: const char* lvReason; void PrintVarReg() const { printf("%s", getRegName(GetRegNum())); } #endif // DEBUG }; // class LclVarDsc enum class SymbolicIntegerValue : int32_t { LongMin, IntMin, ShortMin, ByteMin, Zero, One, ByteMax, UByteMax, ShortMax, UShortMax, IntMax, UIntMax, LongMax, }; inline constexpr bool operator>(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) > static_cast<int32_t>(right); } inline constexpr bool operator>=(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) >= static_cast<int32_t>(right); } inline constexpr bool operator<(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) < static_cast<int32_t>(right); } inline constexpr bool operator<=(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) <= static_cast<int32_t>(right); } // Represents an integral range useful for reasoning about integral casts. // It uses a symbolic representation for lower and upper bounds so // that it can efficiently handle integers of all sizes on all hosts. // // Note that the ranges represented by this class are **always** in the // "signed" domain. This is so that if we know the range a node produces, it // can be trivially used to determine if a cast above the node does or does not // overflow, which requires that the interpretation of integers be the same both // for the "input" and "output". We choose signed interpretation here because it // produces nice continuous ranges and because IR uses sign-extension for constants. // // Some examples of how ranges are computed for casts: // 1. CAST_OVF(ubyte <- uint): does not overflow for [0..UBYTE_MAX], produces the // same range - all casts that do not change the representation, i. e. have the same // "actual" input and output type, have the same "input" and "output" range. // 2. CAST_OVF(ulong <- uint): never oveflows => the "input" range is [INT_MIN..INT_MAX] // (aka all possible 32 bit integers). Produces [0..UINT_MAX] (aka all possible 32 // bit integers zero-extended to 64 bits). // 3. CAST_OVF(int <- uint): overflows for inputs larger than INT_MAX <=> less than 0 // when interpreting as signed => the "input" range is [0..INT_MAX], the same range // being the produced one as the node does not change the width of the integer. // class IntegralRange { private: SymbolicIntegerValue m_lowerBound; SymbolicIntegerValue m_upperBound; public: IntegralRange() = default; IntegralRange(SymbolicIntegerValue lowerBound, SymbolicIntegerValue upperBound) : m_lowerBound(lowerBound), m_upperBound(upperBound) { assert(lowerBound <= upperBound); } bool Contains(int64_t value) const; bool Contains(IntegralRange other) const { return (m_lowerBound <= other.m_lowerBound) && (other.m_upperBound <= m_upperBound); } bool IsPositive() { return m_lowerBound >= SymbolicIntegerValue::Zero; } bool Equals(IntegralRange other) const { return (m_lowerBound == other.m_lowerBound) && (m_upperBound == other.m_upperBound); } static int64_t SymbolicToRealValue(SymbolicIntegerValue value); static SymbolicIntegerValue LowerBoundForType(var_types type); static SymbolicIntegerValue UpperBoundForType(var_types type); static IntegralRange ForType(var_types type) { return {LowerBoundForType(type), UpperBoundForType(type)}; } static IntegralRange ForNode(GenTree* node, Compiler* compiler); static IntegralRange ForCastInput(GenTreeCast* cast); static IntegralRange ForCastOutput(GenTreeCast* cast); #ifdef DEBUG static void Print(IntegralRange range); #endif // DEBUG }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX TempsInfo XX XX XX XX The temporary lclVars allocated by the compiler for code generation XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /***************************************************************************** * * The following keeps track of temporaries allocated in the stack frame * during code-generation (after register allocation). These spill-temps are * only used if we run out of registers while evaluating a tree. * * These are different from the more common temps allocated by lvaGrabTemp(). */ class TempDsc { public: TempDsc* tdNext; private: int tdOffs; #ifdef DEBUG static const int BAD_TEMP_OFFSET = 0xDDDDDDDD; // used as a sentinel "bad value" for tdOffs in DEBUG #endif // DEBUG int tdNum; BYTE tdSize; var_types tdType; public: TempDsc(int _tdNum, unsigned _tdSize, var_types _tdType) : tdNum(_tdNum), tdSize((BYTE)_tdSize), tdType(_tdType) { #ifdef DEBUG // temps must have a negative number (so they have a different number from all local variables) assert(tdNum < 0); tdOffs = BAD_TEMP_OFFSET; #endif // DEBUG if (tdNum != _tdNum) { IMPL_LIMITATION("too many spill temps"); } } #ifdef DEBUG bool tdLegalOffset() const { return tdOffs != BAD_TEMP_OFFSET; } #endif // DEBUG int tdTempOffs() const { assert(tdLegalOffset()); return tdOffs; } void tdSetTempOffs(int offs) { tdOffs = offs; assert(tdLegalOffset()); } void tdAdjustTempOffs(int offs) { tdOffs += offs; assert(tdLegalOffset()); } int tdTempNum() const { assert(tdNum < 0); return tdNum; } unsigned tdTempSize() const { return tdSize; } var_types tdTempType() const { return tdType; } }; // interface to hide linearscan implementation from rest of compiler class LinearScanInterface { public: virtual void doLinearScan() = 0; virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb) = 0; virtual bool willEnregisterLocalVars() const = 0; #if TRACK_LSRA_STATS virtual void dumpLsraStatsCsv(FILE* file) = 0; virtual void dumpLsraStatsSummary(FILE* file) = 0; #endif // TRACK_LSRA_STATS }; LinearScanInterface* getLinearScanAllocator(Compiler* comp); // Information about arrays: their element type and size, and the offset of the first element. // We label GT_IND's that are array indices with GTF_IND_ARR_INDEX, and, for such nodes, // associate an array info via the map retrieved by GetArrayInfoMap(). This information is used, // for example, in value numbering of array index expressions. struct ArrayInfo { var_types m_elemType; CORINFO_CLASS_HANDLE m_elemStructType; unsigned m_elemSize; unsigned m_elemOffset; ArrayInfo() : m_elemType(TYP_UNDEF), m_elemStructType(nullptr), m_elemSize(0), m_elemOffset(0) { } ArrayInfo(var_types elemType, unsigned elemSize, unsigned elemOffset, CORINFO_CLASS_HANDLE elemStructType) : m_elemType(elemType), m_elemStructType(elemStructType), m_elemSize(elemSize), m_elemOffset(elemOffset) { } }; // This enumeration names the phases into which we divide compilation. The phases should completely // partition a compilation. enum Phases { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) enum_nm, #include "compphases.h" PHASE_NUMBER_OF }; extern const char* PhaseNames[]; extern const char* PhaseEnums[]; extern const LPCWSTR PhaseShortNames[]; // Specify which checks should be run after each phase // enum class PhaseChecks { CHECK_NONE, CHECK_ALL }; // Specify compiler data that a phase might modify enum class PhaseStatus : unsigned { MODIFIED_NOTHING, MODIFIED_EVERYTHING }; // The following enum provides a simple 1:1 mapping to CLR API's enum API_ICorJitInfo_Names { #define DEF_CLR_API(name) API_##name, #include "ICorJitInfo_API_names.h" API_COUNT }; //--------------------------------------------------------------- // Compilation time. // // A "CompTimeInfo" is a structure for tracking the compilation time of one or more methods. // We divide a compilation into a sequence of contiguous phases, and track the total (per-thread) cycles // of the compilation, as well as the cycles for each phase. We also track the number of bytecodes. // If there is a failure in reading a timer at any point, the "CompTimeInfo" becomes invalid, as indicated // by "m_timerFailure" being true. // If FEATURE_JIT_METHOD_PERF is not set, we define a minimal form of this, enough to let other code compile. struct CompTimeInfo { #ifdef FEATURE_JIT_METHOD_PERF // The string names of the phases. static const char* PhaseNames[]; static bool PhaseHasChildren[]; static int PhaseParent[]; static bool PhaseReportsIRSize[]; unsigned m_byteCodeBytes; unsigned __int64 m_totalCycles; unsigned __int64 m_invokesByPhase[PHASE_NUMBER_OF]; unsigned __int64 m_cyclesByPhase[PHASE_NUMBER_OF]; #if MEASURE_CLRAPI_CALLS unsigned __int64 m_CLRinvokesByPhase[PHASE_NUMBER_OF]; unsigned __int64 m_CLRcyclesByPhase[PHASE_NUMBER_OF]; #endif unsigned m_nodeCountAfterPhase[PHASE_NUMBER_OF]; // For better documentation, we call EndPhase on // non-leaf phases. We should also call EndPhase on the // last leaf subphase; obviously, the elapsed cycles between the EndPhase // for the last leaf subphase and the EndPhase for an ancestor should be very small. // We add all such "redundant end phase" intervals to this variable below; we print // it out in a report, so we can verify that it is, indeed, very small. If it ever // isn't, this means that we're doing something significant between the end of the last // declared subphase and the end of its parent. unsigned __int64 m_parentPhaseEndSlop; bool m_timerFailure; #if MEASURE_CLRAPI_CALLS // The following measures the time spent inside each individual CLR API call. unsigned m_allClrAPIcalls; unsigned m_perClrAPIcalls[API_ICorJitInfo_Names::API_COUNT]; unsigned __int64 m_allClrAPIcycles; unsigned __int64 m_perClrAPIcycles[API_ICorJitInfo_Names::API_COUNT]; unsigned __int32 m_maxClrAPIcycles[API_ICorJitInfo_Names::API_COUNT]; #endif // MEASURE_CLRAPI_CALLS CompTimeInfo(unsigned byteCodeBytes); #endif }; #ifdef FEATURE_JIT_METHOD_PERF #if MEASURE_CLRAPI_CALLS struct WrapICorJitInfo; #endif // This class summarizes the JIT time information over the course of a run: the number of methods compiled, // and the total and maximum timings. (These are instances of the "CompTimeInfo" type described above). // The operation of adding a single method's timing to the summary may be performed concurrently by several // threads, so it is protected by a lock. // This class is intended to be used as a singleton type, with only a single instance. class CompTimeSummaryInfo { // This lock protects the fields of all CompTimeSummaryInfo(s) (of which we expect there to be one). static CritSecObject s_compTimeSummaryLock; int m_numMethods; int m_totMethods; CompTimeInfo m_total; CompTimeInfo m_maximum; int m_numFilteredMethods; CompTimeInfo m_filtered; // This can use what ever data you want to determine if the value to be added // belongs in the filtered section (it's always included in the unfiltered section) bool IncludedInFilteredData(CompTimeInfo& info); public: // This is the unique CompTimeSummaryInfo object for this instance of the runtime. static CompTimeSummaryInfo s_compTimeSummary; CompTimeSummaryInfo() : m_numMethods(0), m_totMethods(0), m_total(0), m_maximum(0), m_numFilteredMethods(0), m_filtered(0) { } // Assumes that "info" is a completed CompTimeInfo for a compilation; adds it to the summary. // This is thread safe. void AddInfo(CompTimeInfo& info, bool includePhases); // Print the summary information to "f". // This is not thread-safe; assumed to be called by only one thread. void Print(FILE* f); }; // A JitTimer encapsulates a CompTimeInfo for a single compilation. It also tracks the start of compilation, // and when the current phase started. This is intended to be part of a Compilation object. // class JitTimer { unsigned __int64 m_start; // Start of the compilation. unsigned __int64 m_curPhaseStart; // Start of the current phase. #if MEASURE_CLRAPI_CALLS unsigned __int64 m_CLRcallStart; // Start of the current CLR API call (if any). unsigned __int64 m_CLRcallInvokes; // CLR API invokes under current outer so far unsigned __int64 m_CLRcallCycles; // CLR API cycles under current outer so far. int m_CLRcallAPInum; // The enum/index of the current CLR API call (or -1). static double s_cyclesPerSec; // Cached for speedier measurements #endif #ifdef DEBUG Phases m_lastPhase; // The last phase that was completed (or (Phases)-1 to start). #endif CompTimeInfo m_info; // The CompTimeInfo for this compilation. static CritSecObject s_csvLock; // Lock to protect the time log file. static FILE* s_csvFile; // The time log file handle. void PrintCsvMethodStats(Compiler* comp); private: void* operator new(size_t); void* operator new[](size_t); void operator delete(void*); void operator delete[](void*); public: // Initialized the timer instance JitTimer(unsigned byteCodeSize); static JitTimer* Create(Compiler* comp, unsigned byteCodeSize) { return ::new (comp, CMK_Unknown) JitTimer(byteCodeSize); } static void PrintCsvHeader(); // Ends the current phase (argument is for a redundant check). void EndPhase(Compiler* compiler, Phases phase); #if MEASURE_CLRAPI_CALLS // Start and end a timed CLR API call. void CLRApiCallEnter(unsigned apix); void CLRApiCallLeave(unsigned apix); #endif // MEASURE_CLRAPI_CALLS // Completes the timing of the current method, which is assumed to have "byteCodeBytes" bytes of bytecode, // and adds it to "sum". void Terminate(Compiler* comp, CompTimeSummaryInfo& sum, bool includePhases); // Attempts to query the cycle counter of the current thread. If successful, returns "true" and sets // *cycles to the cycle counter value. Otherwise, returns false and sets the "m_timerFailure" flag of // "m_info" to true. bool GetThreadCycles(unsigned __int64* cycles) { bool res = CycleTimer::GetThreadCyclesS(cycles); if (!res) { m_info.m_timerFailure = true; } return res; } static void Shutdown(); }; #endif // FEATURE_JIT_METHOD_PERF //------------------- Function/Funclet info ------------------------------- enum FuncKind : BYTE { FUNC_ROOT, // The main/root function (always id==0) FUNC_HANDLER, // a funclet associated with an EH handler (finally, fault, catch, filter handler) FUNC_FILTER, // a funclet associated with an EH filter FUNC_COUNT }; class emitLocation; struct FuncInfoDsc { FuncKind funKind; BYTE funFlags; // Currently unused, just here for padding unsigned short funEHIndex; // index, into the ebd table, of innermost EH clause corresponding to this // funclet. It is only valid if funKind field indicates this is a // EH-related funclet: FUNC_HANDLER or FUNC_FILTER #if defined(TARGET_AMD64) // TODO-AMD64-Throughput: make the AMD64 info more like the ARM info to avoid having this large static array. emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; UNWIND_INFO unwindHeader; // Maximum of 255 UNWIND_CODE 'nodes' and then the unwind header. If there are an odd // number of codes, the VM or Zapper will 4-byte align the whole thing. BYTE unwindCodes[offsetof(UNWIND_INFO, UnwindCode) + (0xFF * sizeof(UNWIND_CODE))]; unsigned unwindCodeSlot; #elif defined(TARGET_X86) emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; #elif defined(TARGET_ARMARCH) UnwindInfo uwi; // Unwind information for this function/funclet's hot section UnwindInfo* uwiCold; // Unwind information for this function/funclet's cold section // Note: we only have a pointer here instead of the actual object, // to save memory in the JIT case (compared to the NGEN case), // where we don't have any cold section. // Note 2: we currently don't support hot/cold splitting in functions // with EH, so uwiCold will be NULL for all funclets. emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; #endif // TARGET_ARMARCH #if defined(FEATURE_CFI_SUPPORT) jitstd::vector<CFI_CODE>* cfiCodes; #endif // FEATURE_CFI_SUPPORT // Eventually we may want to move rsModifiedRegsMask, lvaOutgoingArgSize, and anything else // that isn't shared between the main function body and funclets. }; struct fgArgTabEntry { GenTreeCall::Use* use; // Points to the argument's GenTreeCall::Use in gtCallArgs or gtCallThisArg. GenTreeCall::Use* lateUse; // Points to the argument's GenTreeCall::Use in gtCallLateArgs, if any. // Get the node that coresponds to this argument entry. // This is the "real" node and not a placeholder or setup node. GenTree* GetNode() const { return lateUse == nullptr ? use->GetNode() : lateUse->GetNode(); } unsigned argNum; // The original argument number, also specifies the required argument evaluation order from the IL private: regNumberSmall regNums[MAX_ARG_REG_COUNT]; // The registers to use when passing this argument, set to REG_STK for // arguments passed on the stack public: unsigned numRegs; // Count of number of registers that this argument uses. // Note that on ARM, if we have a double hfa, this reflects the number // of DOUBLE registers. #if defined(UNIX_AMD64_ABI) // Unix amd64 will split floating point types and integer types in structs // between floating point and general purpose registers. Keep track of that // information so we do not need to recompute it later. unsigned structIntRegs; unsigned structFloatRegs; #endif // UNIX_AMD64_ABI #if defined(DEBUG_ARG_SLOTS) // These fields were used to calculate stack size in stack slots for arguments // but now they are replaced by precise `m_byteOffset/m_byteSize` because of // arm64 apple abi requirements. // A slot is a pointer sized region in the OutArg area. unsigned slotNum; // When an argument is passed in the OutArg area this is the slot number in the OutArg area unsigned numSlots; // Count of number of slots that this argument uses #endif // DEBUG_ARG_SLOTS // Return number of stack slots that this argument is taking. // TODO-Cleanup: this function does not align with arm64 apple model, // delete it. In most cases we just want to know if we it is using stack or not // but in some cases we are checking if it is a multireg arg, like: // `numRegs + GetStackSlotsNumber() > 1` that is harder to replace. // unsigned GetStackSlotsNumber() const { return roundUp(GetStackByteSize(), TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; } private: unsigned _lateArgInx; // index into gtCallLateArgs list; UINT_MAX if this is not a late arg. public: unsigned tmpNum; // the LclVar number if we had to force evaluation of this arg var_types argType; // The type used to pass this argument. This is generally the original argument type, but when a // struct is passed as a scalar type, this is that type. // Note that if a struct is passed by reference, this will still be the struct type. bool needTmp : 1; // True when we force this argument's evaluation into a temp LclVar bool needPlace : 1; // True when we must replace this argument with a placeholder node bool isTmp : 1; // True when we setup a temp LclVar for this argument due to size issues with the struct bool processed : 1; // True when we have decided the evaluation order for this argument in the gtCallLateArgs bool isBackFilled : 1; // True when the argument fills a register slot skipped due to alignment requirements of // previous arguments. NonStandardArgKind nonStandardArgKind : 4; // The non-standard arg kind. Non-standard args are args that are forced // to be in certain registers or on the stack, regardless of where they // appear in the arg list. bool isStruct : 1; // True if this is a struct arg bool _isVararg : 1; // True if the argument is in a vararg context. bool passedByRef : 1; // True iff the argument is passed by reference. #if FEATURE_ARG_SPLIT bool _isSplit : 1; // True when this argument is split between the registers and OutArg area #endif // FEATURE_ARG_SPLIT #ifdef FEATURE_HFA_FIELDS_PRESENT CorInfoHFAElemType _hfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA). #endif CorInfoHFAElemType GetHfaElemKind() const { #ifdef FEATURE_HFA_FIELDS_PRESENT return _hfaElemKind; #else NOWAY_MSG("GetHfaElemKind"); return CORINFO_HFA_ELEM_NONE; #endif } void SetHfaElemKind(CorInfoHFAElemType elemKind) { #ifdef FEATURE_HFA_FIELDS_PRESENT _hfaElemKind = elemKind; #else NOWAY_MSG("SetHfaElemKind"); #endif } bool isNonStandard() const { return nonStandardArgKind != NonStandardArgKind::None; } // Returns true if the IR node for this non-standarg arg is added by fgInitArgInfo. // In this case, it must be removed by GenTreeCall::ResetArgInfo. bool isNonStandardArgAddedLate() const { switch (static_cast<NonStandardArgKind>(nonStandardArgKind)) { case NonStandardArgKind::None: case NonStandardArgKind::PInvokeFrame: case NonStandardArgKind::ShiftLow: case NonStandardArgKind::ShiftHigh: case NonStandardArgKind::FixedRetBuffer: case NonStandardArgKind::ValidateIndirectCallTarget: return false; case NonStandardArgKind::WrapperDelegateCell: case NonStandardArgKind::VirtualStubCell: case NonStandardArgKind::PInvokeCookie: case NonStandardArgKind::PInvokeTarget: case NonStandardArgKind::R2RIndirectionCell: return true; default: unreached(); } } bool isLateArg() const { bool isLate = (_lateArgInx != UINT_MAX); return isLate; } unsigned GetLateArgInx() const { assert(isLateArg()); return _lateArgInx; } void SetLateArgInx(unsigned inx) { _lateArgInx = inx; } regNumber GetRegNum() const { return (regNumber)regNums[0]; } regNumber GetOtherRegNum() const { return (regNumber)regNums[1]; } #if defined(UNIX_AMD64_ABI) SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; #endif void setRegNum(unsigned int i, regNumber regNum) { assert(i < MAX_ARG_REG_COUNT); regNums[i] = (regNumberSmall)regNum; } regNumber GetRegNum(unsigned int i) { assert(i < MAX_ARG_REG_COUNT); return (regNumber)regNums[i]; } bool IsSplit() const { #if FEATURE_ARG_SPLIT return compFeatureArgSplit() && _isSplit; #else // FEATURE_ARG_SPLIT return false; #endif } void SetSplit(bool value) { #if FEATURE_ARG_SPLIT _isSplit = value; #endif } bool IsVararg() const { return compFeatureVarArg() && _isVararg; } void SetIsVararg(bool value) { if (compFeatureVarArg()) { _isVararg = value; } } bool IsHfaArg() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetHfaElemKind()); } else { return false; } } bool IsHfaRegArg() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetHfaElemKind()) && isPassedInRegisters(); } else { return false; } } unsigned intRegCount() const { #if defined(UNIX_AMD64_ABI) if (this->isStruct) { return this->structIntRegs; } #endif // defined(UNIX_AMD64_ABI) if (!this->isPassedInFloatRegisters()) { return this->numRegs; } return 0; } unsigned floatRegCount() const { #if defined(UNIX_AMD64_ABI) if (this->isStruct) { return this->structFloatRegs; } #endif // defined(UNIX_AMD64_ABI) if (this->isPassedInFloatRegisters()) { return this->numRegs; } return 0; } // Get the number of bytes that this argument is occupying on the stack, // including padding up to the target pointer size for platforms // where a stack argument can't take less. unsigned GetStackByteSize() const { if (!IsSplit() && numRegs > 0) { return 0; } assert(!IsHfaArg() || !IsSplit()); assert(GetByteSize() > TARGET_POINTER_SIZE * numRegs); const unsigned stackByteSize = GetByteSize() - TARGET_POINTER_SIZE * numRegs; return stackByteSize; } var_types GetHfaType() const { if (GlobalJitOptions::compFeatureHfa) { return HfaTypeFromElemKind(GetHfaElemKind()); } else { return TYP_UNDEF; } } void SetHfaType(var_types type, unsigned hfaSlots) { if (GlobalJitOptions::compFeatureHfa) { if (type != TYP_UNDEF) { // We must already have set the passing mode. assert(numRegs != 0 || GetStackByteSize() != 0); // We originally set numRegs according to the size of the struct, but if the size of the // hfaType is not the same as the pointer size, we need to correct it. // Note that hfaSlots is the number of registers we will use. For ARM, that is twice // the number of "double registers". unsigned numHfaRegs = hfaSlots; #ifdef TARGET_ARM if (type == TYP_DOUBLE) { // Must be an even number of registers. assert((numRegs & 1) == 0); numHfaRegs = hfaSlots / 2; } #endif // TARGET_ARM if (!IsHfaArg()) { // We haven't previously set this; do so now. CorInfoHFAElemType elemKind = HfaElemKindFromType(type); SetHfaElemKind(elemKind); // Ensure we've allocated enough bits. assert(GetHfaElemKind() == elemKind); if (isPassedInRegisters()) { numRegs = numHfaRegs; } } else { // We've already set this; ensure that it's consistent. if (isPassedInRegisters()) { assert(numRegs == numHfaRegs); } assert(type == HfaTypeFromElemKind(GetHfaElemKind())); } } } } #ifdef TARGET_ARM void SetIsBackFilled(bool backFilled) { isBackFilled = backFilled; } bool IsBackFilled() const { return isBackFilled; } #else // !TARGET_ARM void SetIsBackFilled(bool backFilled) { } bool IsBackFilled() const { return false; } #endif // !TARGET_ARM bool isPassedInRegisters() const { return !IsSplit() && (numRegs != 0); } bool isPassedInFloatRegisters() const { #ifdef TARGET_X86 return false; #else return isValidFloatArgReg(GetRegNum()); #endif } // Can we replace the struct type of this node with a primitive type for argument passing? bool TryPassAsPrimitive() const { return !IsSplit() && ((numRegs == 1) || (m_byteSize <= TARGET_POINTER_SIZE)); } #if defined(DEBUG_ARG_SLOTS) // Returns the number of "slots" used, where for this purpose a // register counts as a slot. unsigned getSlotCount() const { if (isBackFilled) { assert(isPassedInRegisters()); assert(numRegs == 1); } else if (GetRegNum() == REG_STK) { assert(!isPassedInRegisters()); assert(numRegs == 0); } else { assert(numRegs > 0); } return numSlots + numRegs; } #endif #if defined(DEBUG_ARG_SLOTS) // Returns the size as a multiple of pointer-size. // For targets without HFAs, this is the same as getSlotCount(). unsigned getSize() const { unsigned size = getSlotCount(); if (GlobalJitOptions::compFeatureHfa) { if (IsHfaRegArg()) { #ifdef TARGET_ARM // We counted the number of regs, but if they are DOUBLE hfa regs we have to double the size. if (GetHfaType() == TYP_DOUBLE) { assert(!IsSplit()); size <<= 1; } #elif defined(TARGET_ARM64) // We counted the number of regs, but if they are FLOAT hfa regs we have to halve the size, // or if they are SIMD16 vector hfa regs we have to double the size. if (GetHfaType() == TYP_FLOAT) { // Round up in case of odd HFA count. size = (size + 1) >> 1; } #ifdef FEATURE_SIMD else if (GetHfaType() == TYP_SIMD16) { size <<= 1; } #endif // FEATURE_SIMD #endif // TARGET_ARM64 } } return size; } #endif // DEBUG_ARG_SLOTS private: unsigned m_byteOffset; // byte size that this argument takes including the padding after. // For example, 1-byte arg on x64 with 8-byte alignment // will have `m_byteSize == 8`, the same arg on apple arm64 will have `m_byteSize == 1`. unsigned m_byteSize; unsigned m_byteAlignment; // usually 4 or 8 bytes (slots/registers). public: void SetByteOffset(unsigned byteOffset) { DEBUG_ARG_SLOTS_ASSERT(byteOffset / TARGET_POINTER_SIZE == slotNum); m_byteOffset = byteOffset; } unsigned GetByteOffset() const { DEBUG_ARG_SLOTS_ASSERT(m_byteOffset / TARGET_POINTER_SIZE == slotNum); return m_byteOffset; } void SetByteSize(unsigned byteSize, bool isStruct, bool isFloatHfa) { unsigned roundedByteSize; if (compMacOsArm64Abi()) { // Only struct types need extension or rounding to pointer size, but HFA<float> does not. if (isStruct && !isFloatHfa) { roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); } else { roundedByteSize = byteSize; } } else { roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); } #if !defined(TARGET_ARM) // Arm32 could have a struct with 8 byte alignment // which rounded size % 8 is not 0. assert(m_byteAlignment != 0); assert(roundedByteSize % m_byteAlignment == 0); #endif // TARGET_ARM #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi() && !isStruct) { assert(roundedByteSize == getSlotCount() * TARGET_POINTER_SIZE); } #endif m_byteSize = roundedByteSize; } unsigned GetByteSize() const { return m_byteSize; } void SetByteAlignment(unsigned byteAlignment) { m_byteAlignment = byteAlignment; } unsigned GetByteAlignment() const { return m_byteAlignment; } // Set the register numbers for a multireg argument. // There's nothing to do on x64/Ux because the structDesc has already been used to set the // register numbers. void SetMultiRegNums() { #if FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI) if (numRegs == 1) { return; } regNumber argReg = GetRegNum(0); #ifdef TARGET_ARM unsigned int regSize = (GetHfaType() == TYP_DOUBLE) ? 2 : 1; #else unsigned int regSize = 1; #endif if (numRegs > MAX_ARG_REG_COUNT) NO_WAY("Multireg argument exceeds the maximum length"); for (unsigned int regIndex = 1; regIndex < numRegs; regIndex++) { argReg = (regNumber)(argReg + regSize); setRegNum(regIndex, argReg); } #endif // FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI) } #ifdef DEBUG // Check that the value of 'isStruct' is consistent. // A struct arg must be one of the following: // - A node of struct type, // - A GT_FIELD_LIST, or // - A node of a scalar type, passed in a single register or slot // (or two slots in the case of a struct pass on the stack as TYP_DOUBLE). // void checkIsStruct() const { GenTree* node = GetNode(); if (isStruct) { if (!varTypeIsStruct(node) && !node->OperIs(GT_FIELD_LIST)) { // This is the case where we are passing a struct as a primitive type. // On most targets, this is always a single register or slot. // However, on ARM this could be two slots if it is TYP_DOUBLE. bool isPassedAsPrimitiveType = ((numRegs == 1) || ((numRegs == 0) && (GetByteSize() <= TARGET_POINTER_SIZE))); #ifdef TARGET_ARM if (!isPassedAsPrimitiveType) { if (node->TypeGet() == TYP_DOUBLE && numRegs == 0 && (numSlots == 2)) { isPassedAsPrimitiveType = true; } } #endif // TARGET_ARM assert(isPassedAsPrimitiveType); } } else { assert(!varTypeIsStruct(node)); } } void Dump() const; #endif }; //------------------------------------------------------------------------- // // The class fgArgInfo is used to handle the arguments // when morphing a GT_CALL node. // class fgArgInfo { Compiler* compiler; // Back pointer to the compiler instance so that we can allocate memory GenTreeCall* callTree; // Back pointer to the GT_CALL node for this fgArgInfo unsigned argCount; // Updatable arg count value #if defined(DEBUG_ARG_SLOTS) unsigned nextSlotNum; // Updatable slot count value #endif unsigned nextStackByteOffset; unsigned stkLevel; // Stack depth when we make this call (for x86) #if defined(UNIX_X86_ABI) bool alignmentDone; // Updateable flag, set to 'true' after we've done any required alignment. unsigned stkSizeBytes; // Size of stack used by this call, in bytes. Calculated during fgMorphArgs(). unsigned padStkAlign; // Stack alignment in bytes required before arguments are pushed for this call. // Computed dynamically during codegen, based on stkSizeBytes and the current // stack level (genStackLevel) when the first stack adjustment is made for // this call. #endif #if FEATURE_FIXED_OUT_ARGS unsigned outArgSize; // Size of the out arg area for the call, will be at least MIN_ARG_AREA_FOR_CALL #endif unsigned argTableSize; // size of argTable array (equal to the argCount when done with fgMorphArgs) bool hasRegArgs; // true if we have one or more register arguments bool hasStackArgs; // true if we have one or more stack arguments bool argsComplete; // marker for state bool argsSorted; // marker for state bool needsTemps; // one or more arguments must be copied to a temp by EvalArgsToTemps fgArgTabEntry** argTable; // variable sized array of per argument descrption: (i.e. argTable[argTableSize]) private: void AddArg(fgArgTabEntry* curArgTabEntry); public: fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned argCount); fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall); fgArgTabEntry* AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg = false); #ifdef UNIX_AMD64_ABI fgArgTabEntry* AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, const bool isStruct, const bool isFloatHfa, const bool isVararg, const regNumber otherRegNum, const unsigned structIntRegs, const unsigned structFloatRegs, const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr = nullptr); #endif // UNIX_AMD64_ABI fgArgTabEntry* AddStkArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, unsigned numSlots, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg = false); void RemorphReset(); void UpdateRegArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing); void UpdateStkArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing); void SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots); void EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode); void ArgsComplete(); void SortArgs(); void EvalArgsToTemps(); unsigned ArgCount() const { return argCount; } fgArgTabEntry** ArgTable() const { return argTable; } #if defined(DEBUG_ARG_SLOTS) unsigned GetNextSlotNum() const { return nextSlotNum; } #endif unsigned GetNextSlotByteOffset() const { return nextStackByteOffset; } bool HasRegArgs() const { return hasRegArgs; } bool NeedsTemps() const { return needsTemps; } bool HasStackArgs() const { return hasStackArgs; } bool AreArgsComplete() const { return argsComplete; } #if FEATURE_FIXED_OUT_ARGS unsigned GetOutArgSize() const { return outArgSize; } void SetOutArgSize(unsigned newVal) { outArgSize = newVal; } #endif // FEATURE_FIXED_OUT_ARGS #if defined(UNIX_X86_ABI) void ComputeStackAlignment(unsigned curStackLevelInBytes) { padStkAlign = AlignmentPad(curStackLevelInBytes, STACK_ALIGN); } unsigned GetStkAlign() const { return padStkAlign; } void SetStkSizeBytes(unsigned newStkSizeBytes) { stkSizeBytes = newStkSizeBytes; } unsigned GetStkSizeBytes() const { return stkSizeBytes; } bool IsStkAlignmentDone() const { return alignmentDone; } void SetStkAlignmentDone() { alignmentDone = true; } #endif // defined(UNIX_X86_ABI) // Get the fgArgTabEntry for the arg at position argNum. fgArgTabEntry* GetArgEntry(unsigned argNum, bool reMorphing = true) const { fgArgTabEntry* curArgTabEntry = nullptr; if (!reMorphing) { // The arg table has not yet been sorted. curArgTabEntry = argTable[argNum]; assert(curArgTabEntry->argNum == argNum); return curArgTabEntry; } for (unsigned i = 0; i < argCount; i++) { curArgTabEntry = argTable[i]; if (curArgTabEntry->argNum == argNum) { return curArgTabEntry; } } noway_assert(!"GetArgEntry: argNum not found"); return nullptr; } void SetNeedsTemps() { needsTemps = true; } // Get the node for the arg at position argIndex. // Caller must ensure that this index is a valid arg index. GenTree* GetArgNode(unsigned argIndex) const { return GetArgEntry(argIndex)->GetNode(); } void Dump(Compiler* compiler) const; }; #ifdef DEBUG // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // We have the ability to mark source expressions with "Test Labels." // These drive assertions within the JIT, or internal JIT testing. For example, we could label expressions // that should be CSE defs, and other expressions that should uses of those defs, with a shared label. enum TestLabel // This must be kept identical to System.Runtime.CompilerServices.JitTestLabel.TestLabel. { TL_SsaName, TL_VN, // Defines a "VN equivalence class". (For full VN, including exceptions thrown). TL_VNNorm, // Like above, but uses the non-exceptional value of the expression. TL_CSE_Def, // This must be identified in the JIT as a CSE def TL_CSE_Use, // This must be identified in the JIT as a CSE use TL_LoopHoist, // Expression must (or must not) be hoisted out of the loop. }; struct TestLabelAndNum { TestLabel m_tl; ssize_t m_num; TestLabelAndNum() : m_tl(TestLabel(0)), m_num(0) { } }; typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, TestLabelAndNum> NodeToTestDataMap; // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif // DEBUG //------------------------------------------------------------------------- // LoopFlags: flags for the loop table. // enum LoopFlags : unsigned short { LPFLG_EMPTY = 0, // LPFLG_UNUSED = 0x0001, // LPFLG_UNUSED = 0x0002, LPFLG_ITER = 0x0004, // loop of form: for (i = icon or lclVar; test_condition(); i++) // LPFLG_UNUSED = 0x0008, LPFLG_CONTAINS_CALL = 0x0010, // If executing the loop body *may* execute a call LPFLG_VAR_INIT = 0x0020, // iterator is initialized with a local var (var # found in lpVarInit) LPFLG_CONST_INIT = 0x0040, // iterator is initialized with a constant (found in lpConstInit) LPFLG_SIMD_LIMIT = 0x0080, // iterator is compared with vector element count (found in lpConstLimit) LPFLG_VAR_LIMIT = 0x0100, // iterator is compared with a local var (var # found in lpVarLimit) LPFLG_CONST_LIMIT = 0x0200, // iterator is compared with a constant (found in lpConstLimit) LPFLG_ARRLEN_LIMIT = 0x0400, // iterator is compared with a.len or a[i].len (found in lpArrLenLimit) LPFLG_HAS_PREHEAD = 0x0800, // lpHead is known to be a preHead for this loop LPFLG_REMOVED = 0x1000, // has been removed from the loop table (unrolled or optimized away) LPFLG_DONT_UNROLL = 0x2000, // do not unroll this loop LPFLG_ASGVARS_YES = 0x4000, // "lpAsgVars" has been computed LPFLG_ASGVARS_INC = 0x8000, // "lpAsgVars" is incomplete -- vars beyond those representable in an AllVarSet // type are assigned to. }; inline constexpr LoopFlags operator~(LoopFlags a) { return (LoopFlags)(~(unsigned short)a); } inline constexpr LoopFlags operator|(LoopFlags a, LoopFlags b) { return (LoopFlags)((unsigned short)a | (unsigned short)b); } inline constexpr LoopFlags operator&(LoopFlags a, LoopFlags b) { return (LoopFlags)((unsigned short)a & (unsigned short)b); } inline LoopFlags& operator|=(LoopFlags& a, LoopFlags b) { return a = (LoopFlags)((unsigned short)a | (unsigned short)b); } inline LoopFlags& operator&=(LoopFlags& a, LoopFlags b) { return a = (LoopFlags)((unsigned short)a & (unsigned short)b); } // The following holds information about instr offsets in terms of generated code. enum class IPmappingDscKind { Prolog, // The mapping represents the start of a prolog. Epilog, // The mapping represents the start of an epilog. NoMapping, // This does not map to any IL offset. Normal, // The mapping maps to an IL offset. }; struct IPmappingDsc { emitLocation ipmdNativeLoc; // the emitter location of the native code corresponding to the IL offset IPmappingDscKind ipmdKind; // The kind of mapping ILLocation ipmdLoc; // The location for normal mappings bool ipmdIsLabel; // Can this code be a branch label? }; struct PreciseIPMapping { emitLocation nativeLoc; DebugInfo debugInfo; }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX The big guy. The sections are currently organized as : XX XX XX XX o GenTree and BasicBlock XX XX o LclVarsInfo XX XX o Importer XX XX o FlowGraph XX XX o Optimizer XX XX o RegAlloc XX XX o EEInterface XX XX o TempsInfo XX XX o RegSet XX XX o GCInfo XX XX o Instruction XX XX o ScopeInfo XX XX o PrologScopeInfo XX XX o CodeGenerator XX XX o UnwindInfo XX XX o Compiler XX XX o typeInfo XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ struct HWIntrinsicInfo; class Compiler { friend class emitter; friend class UnwindInfo; friend class UnwindFragmentInfo; friend class UnwindEpilogInfo; friend class JitTimer; friend class LinearScan; friend class fgArgInfo; friend class Rationalizer; friend class Phase; friend class Lowering; friend class CSE_DataFlow; friend class CSE_Heuristic; friend class CodeGenInterface; friend class CodeGen; friend class LclVarDsc; friend class TempDsc; friend class LIR; friend class ObjectAllocator; friend class LocalAddressVisitor; friend struct GenTree; friend class MorphInitBlockHelper; friend class MorphCopyBlockHelper; #ifdef FEATURE_HW_INTRINSICS friend struct HWIntrinsicInfo; #endif // FEATURE_HW_INTRINSICS #ifndef TARGET_64BIT friend class DecomposeLongs; #endif // !TARGET_64BIT /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Misc structs definitions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: hashBvGlobalData hbvGlobalData; // Used by the hashBv bitvector package. #ifdef DEBUG bool verbose; bool verboseTrees; bool shouldUseVerboseTrees(); bool asciiTrees; // If true, dump trees using only ASCII characters bool shouldDumpASCIITrees(); bool verboseSsa; // If true, produce especially verbose dump output in SSA construction. bool shouldUseVerboseSsa(); bool treesBeforeAfterMorph; // If true, print trees before/after morphing (paired by an intra-compilation id: int morphNum; // This counts the the trees that have been morphed, allowing us to label each uniquely. bool doExtraSuperPmiQueries; void makeExtraStructQueries(CORINFO_CLASS_HANDLE structHandle, int level); // Make queries recursively 'level' deep. const char* VarNameToStr(VarName name) { return name; } DWORD expensiveDebugCheckLevel; #endif #if FEATURE_MULTIREG_RET GenTree* impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)); #endif // FEATURE_MULTIREG_RET #ifdef TARGET_X86 bool isTrivialPointerSizedStruct(CORINFO_CLASS_HANDLE clsHnd) const; #endif // TARGET_X86 //------------------------------------------------------------------------- // Functions to handle homogeneous floating-point aggregates (HFAs) in ARM/ARM64. // HFAs are one to four element structs where each element is the same // type, either all float or all double. We handle HVAs (one to four elements of // vector types) uniformly with HFAs. HFAs are treated specially // in the ARM/ARM64 Procedure Call Standards, specifically, they are passed in // floating-point registers instead of the general purpose registers. // bool IsHfa(CORINFO_CLASS_HANDLE hClass); bool IsHfa(GenTree* tree); var_types GetHfaType(GenTree* tree); unsigned GetHfaCount(GenTree* tree); var_types GetHfaType(CORINFO_CLASS_HANDLE hClass); unsigned GetHfaCount(CORINFO_CLASS_HANDLE hClass); bool IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass, CorInfoCallConvExtension callConv); //------------------------------------------------------------------------- // The following is used for validating format of EH table // struct EHNodeDsc; typedef struct EHNodeDsc* pEHNodeDsc; EHNodeDsc* ehnTree; // root of the tree comprising the EHnodes. EHNodeDsc* ehnNext; // root of the tree comprising the EHnodes. struct EHNodeDsc { enum EHBlockType { TryNode, FilterNode, HandlerNode, FinallyNode, FaultNode }; EHBlockType ehnBlockType; // kind of EH block IL_OFFSET ehnStartOffset; // IL offset of start of the EH block IL_OFFSET ehnEndOffset; // IL offset past end of the EH block. (TODO: looks like verInsertEhNode() sets this to // the last IL offset, not "one past the last one", i.e., the range Start to End is // inclusive). pEHNodeDsc ehnNext; // next (non-nested) block in sequential order pEHNodeDsc ehnChild; // leftmost nested block union { pEHNodeDsc ehnTryNode; // for filters and handlers, the corresponding try node pEHNodeDsc ehnHandlerNode; // for a try node, the corresponding handler node }; pEHNodeDsc ehnFilterNode; // if this is a try node and has a filter, otherwise 0 pEHNodeDsc ehnEquivalent; // if blockType=tryNode, start offset and end offset is same, void ehnSetTryNodeType() { ehnBlockType = TryNode; } void ehnSetFilterNodeType() { ehnBlockType = FilterNode; } void ehnSetHandlerNodeType() { ehnBlockType = HandlerNode; } void ehnSetFinallyNodeType() { ehnBlockType = FinallyNode; } void ehnSetFaultNodeType() { ehnBlockType = FaultNode; } bool ehnIsTryBlock() { return ehnBlockType == TryNode; } bool ehnIsFilterBlock() { return ehnBlockType == FilterNode; } bool ehnIsHandlerBlock() { return ehnBlockType == HandlerNode; } bool ehnIsFinallyBlock() { return ehnBlockType == FinallyNode; } bool ehnIsFaultBlock() { return ehnBlockType == FaultNode; } // returns true if there is any overlap between the two nodes static bool ehnIsOverlap(pEHNodeDsc node1, pEHNodeDsc node2) { if (node1->ehnStartOffset < node2->ehnStartOffset) { return (node1->ehnEndOffset >= node2->ehnStartOffset); } else { return (node1->ehnStartOffset <= node2->ehnEndOffset); } } // fails with BADCODE if inner is not completely nested inside outer static bool ehnIsNested(pEHNodeDsc inner, pEHNodeDsc outer) { return ((inner->ehnStartOffset >= outer->ehnStartOffset) && (inner->ehnEndOffset <= outer->ehnEndOffset)); } }; //------------------------------------------------------------------------- // Exception handling functions // #if !defined(FEATURE_EH_FUNCLETS) bool ehNeedsShadowSPslots() { return (info.compXcptnsCount || opts.compDbgEnC); } // 0 for methods with no EH // 1 for methods with non-nested EH, or where only the try blocks are nested // 2 for a method with a catch within a catch // etc. unsigned ehMaxHndNestingCount; #endif // !FEATURE_EH_FUNCLETS static bool jitIsBetween(unsigned value, unsigned start, unsigned end); static bool jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end); bool bbInCatchHandlerILRange(BasicBlock* blk); bool bbInFilterILRange(BasicBlock* blk); bool bbInTryRegions(unsigned regionIndex, BasicBlock* blk); bool bbInExnFlowRegions(unsigned regionIndex, BasicBlock* blk); bool bbInHandlerRegions(unsigned regionIndex, BasicBlock* blk); bool bbInCatchHandlerRegions(BasicBlock* tryBlk, BasicBlock* hndBlk); unsigned short bbFindInnermostCommonTryRegion(BasicBlock* bbOne, BasicBlock* bbTwo); unsigned short bbFindInnermostTryRegionContainingHandlerRegion(unsigned handlerIndex); unsigned short bbFindInnermostHandlerRegionContainingTryRegion(unsigned tryIndex); // Returns true if "block" is the start of a try region. bool bbIsTryBeg(BasicBlock* block); // Returns true if "block" is the start of a handler or filter region. bool bbIsHandlerBeg(BasicBlock* block); // Returns true iff "block" is where control flows if an exception is raised in the // try region, and sets "*regionIndex" to the index of the try for the handler. // Differs from "IsHandlerBeg" in the case of filters, where this is true for the first // block of the filter, but not for the filter's handler. bool bbIsExFlowBlock(BasicBlock* block, unsigned* regionIndex); bool ehHasCallableHandlers(); // Return the EH descriptor for the given region index. EHblkDsc* ehGetDsc(unsigned regionIndex); // Return the EH index given a region descriptor. unsigned ehGetIndex(EHblkDsc* ehDsc); // Return the EH descriptor index of the enclosing try, for the given region index. unsigned ehGetEnclosingTryIndex(unsigned regionIndex); // Return the EH descriptor index of the enclosing handler, for the given region index. unsigned ehGetEnclosingHndIndex(unsigned regionIndex); // Return the EH descriptor for the most nested 'try' region this BasicBlock is a member of (or nullptr if this // block is not in a 'try' region). EHblkDsc* ehGetBlockTryDsc(BasicBlock* block); // Return the EH descriptor for the most nested filter or handler region this BasicBlock is a member of (or nullptr // if this block is not in a filter or handler region). EHblkDsc* ehGetBlockHndDsc(BasicBlock* block); // Return the EH descriptor for the most nested region that may handle exceptions raised in this BasicBlock (or // nullptr if this block's exceptions propagate to caller). EHblkDsc* ehGetBlockExnFlowDsc(BasicBlock* block); EHblkDsc* ehIsBlockTryLast(BasicBlock* block); EHblkDsc* ehIsBlockHndLast(BasicBlock* block); bool ehIsBlockEHLast(BasicBlock* block); bool ehBlockHasExnFlowDsc(BasicBlock* block); // Return the region index of the most nested EH region this block is in. unsigned ehGetMostNestedRegionIndex(BasicBlock* block, bool* inTryRegion); // Find the true enclosing try index, ignoring 'mutual protect' try. Uses IL ranges to check. unsigned ehTrueEnclosingTryIndexIL(unsigned regionIndex); // Return the index of the most nested enclosing region for a particular EH region. Returns NO_ENCLOSING_INDEX // if there is no enclosing region. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' // is set to 'true' if the enclosing region is a 'try', or 'false' if the enclosing region is a handler. // (It can never be a filter.) unsigned ehGetEnclosingRegionIndex(unsigned regionIndex, bool* inTryRegion); // A block has been deleted. Update the EH table appropriately. void ehUpdateForDeletedBlock(BasicBlock* block); // Determine whether a block can be deleted while preserving the EH normalization rules. bool ehCanDeleteEmptyBlock(BasicBlock* block); // Update the 'last' pointers in the EH table to reflect new or deleted blocks in an EH region. void ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* newLast); // For a finally handler, find the region index that the BBJ_CALLFINALLY lives in that calls the handler, // or NO_ENCLOSING_INDEX if the BBJ_CALLFINALLY lives in the main function body. Normally, the index // is the same index as the handler (and the BBJ_CALLFINALLY lives in the 'try' region), but for AMD64 the // BBJ_CALLFINALLY lives in the enclosing try or handler region, whichever is more nested, or the main function // body. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' is set to 'true' if the // BBJ_CALLFINALLY lives in the returned index's 'try' region, or 'false' if lives in the handler region. (It never // lives in a filter.) unsigned ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTryRegion); // Find the range of basic blocks in which all BBJ_CALLFINALLY will be found that target the 'finallyIndex' region's // handler. Set begBlk to the first block, and endBlk to the block after the last block of the range // (nullptr if the last block is the last block in the program). // Precondition: 'finallyIndex' is the EH region of a try/finally clause. void ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** begBlk, BasicBlock** endBlk); #ifdef DEBUG // Given a BBJ_CALLFINALLY block and the EH region index of the finally it is calling, return // 'true' if the BBJ_CALLFINALLY is in the correct EH region. bool ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex); #endif // DEBUG #if defined(FEATURE_EH_FUNCLETS) // Do we need a PSPSym in the main function? For codegen purposes, we only need one // if there is a filter that protects a region with a nested EH clause (such as a // try/catch nested in the 'try' body of a try/filter/filter-handler). See // genFuncletProlog() for more details. However, the VM seems to use it for more // purposes, maybe including debugging. Until we are sure otherwise, always create // a PSPSym for functions with any EH. bool ehNeedsPSPSym() const { #ifdef TARGET_X86 return false; #else // TARGET_X86 return compHndBBtabCount > 0; #endif // TARGET_X86 } bool ehAnyFunclets(); // Are there any funclets in this function? unsigned ehFuncletCount(); // Return the count of funclets in the function unsigned bbThrowIndex(BasicBlock* blk); // Get the index to use as the cache key for sharing throw blocks #else // !FEATURE_EH_FUNCLETS bool ehAnyFunclets() { return false; } unsigned ehFuncletCount() { return 0; } unsigned bbThrowIndex(BasicBlock* blk) { return blk->bbTryIndex; } // Get the index to use as the cache key for sharing throw blocks #endif // !FEATURE_EH_FUNCLETS // Returns a flowList representing the "EH predecessors" of "blk". These are the normal predecessors of // "blk", plus one special case: if "blk" is the first block of a handler, considers the predecessor(s) of the first // first block of the corresponding try region to be "EH predecessors". (If there is a single such predecessor, // for example, we want to consider that the immediate dominator of the catch clause start block, so it's // convenient to also consider it a predecessor.) flowList* BlockPredsWithEH(BasicBlock* blk); // This table is useful for memoization of the method above. typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, flowList*> BlockToFlowListMap; BlockToFlowListMap* m_blockToEHPreds; BlockToFlowListMap* GetBlockToEHPreds() { if (m_blockToEHPreds == nullptr) { m_blockToEHPreds = new (getAllocator()) BlockToFlowListMap(getAllocator()); } return m_blockToEHPreds; } void* ehEmitCookie(BasicBlock* block); UNATIVE_OFFSET ehCodeOffset(BasicBlock* block); EHblkDsc* ehInitHndRange(BasicBlock* src, IL_OFFSET* hndBeg, IL_OFFSET* hndEnd, bool* inFilter); EHblkDsc* ehInitTryRange(BasicBlock* src, IL_OFFSET* tryBeg, IL_OFFSET* tryEnd); EHblkDsc* ehInitHndBlockRange(BasicBlock* blk, BasicBlock** hndBeg, BasicBlock** hndLast, bool* inFilter); EHblkDsc* ehInitTryBlockRange(BasicBlock* blk, BasicBlock** tryBeg, BasicBlock** tryLast); void fgSetTryBeg(EHblkDsc* handlerTab, BasicBlock* newTryBeg); void fgSetTryEnd(EHblkDsc* handlerTab, BasicBlock* newTryLast); void fgSetHndEnd(EHblkDsc* handlerTab, BasicBlock* newHndLast); void fgSkipRmvdBlocks(EHblkDsc* handlerTab); void fgAllocEHTable(); void fgRemoveEHTableEntry(unsigned XTnum); #if defined(FEATURE_EH_FUNCLETS) EHblkDsc* fgAddEHTableEntry(unsigned XTnum); #endif // FEATURE_EH_FUNCLETS #if !FEATURE_EH void fgRemoveEH(); #endif // !FEATURE_EH void fgSortEHTable(); // Causes the EH table to obey some well-formedness conditions, by inserting // empty BB's when necessary: // * No block is both the first block of a handler and the first block of a try. // * No block is the first block of multiple 'try' regions. // * No block is the last block of multiple EH regions. void fgNormalizeEH(); bool fgNormalizeEHCase1(); bool fgNormalizeEHCase2(); bool fgNormalizeEHCase3(); void fgCheckForLoopsInHandlers(); #ifdef DEBUG void dispIncomingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause); void dispOutgoingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause); void fgVerifyHandlerTab(); void fgDispHandlerTab(); #endif // DEBUG bool fgNeedToSortEHTable; void verInitEHTree(unsigned numEHClauses); void verInsertEhNode(CORINFO_EH_CLAUSE* clause, EHblkDsc* handlerTab); void verInsertEhNodeInTree(EHNodeDsc** ppRoot, EHNodeDsc* node); void verInsertEhNodeParent(EHNodeDsc** ppRoot, EHNodeDsc* node); void verCheckNestingLevel(EHNodeDsc* initRoot); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX GenTree and BasicBlock XX XX XX XX Functions to allocate and display the GenTrees and BasicBlocks XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // Functions to create nodes Statement* gtNewStmt(GenTree* expr = nullptr); Statement* gtNewStmt(GenTree* expr, const DebugInfo& di); // For unary opers. GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, bool doSimplifications = TRUE); // For binary opers. GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2); GenTreeColon* gtNewColonNode(var_types type, GenTree* elseNode, GenTree* thenNode); GenTreeQmark* gtNewQmarkNode(var_types type, GenTree* cond, GenTreeColon* colon); GenTree* gtNewLargeOperNode(genTreeOps oper, var_types type = TYP_I_IMPL, GenTree* op1 = nullptr, GenTree* op2 = nullptr); GenTreeIntCon* gtNewIconNode(ssize_t value, var_types type = TYP_INT); GenTreeIntCon* gtNewIconNode(unsigned fieldOffset, FieldSeqNode* fieldSeq); GenTreeIntCon* gtNewNull(); GenTreeIntCon* gtNewTrue(); GenTreeIntCon* gtNewFalse(); GenTree* gtNewPhysRegNode(regNumber reg, var_types type); GenTree* gtNewJmpTableNode(); GenTree* gtNewIndOfIconHandleNode(var_types indType, size_t value, GenTreeFlags iconFlags, bool isInvariant); GenTree* gtNewIconHandleNode(size_t value, GenTreeFlags flags, FieldSeqNode* fields = nullptr); GenTreeFlags gtTokenToIconFlags(unsigned token); GenTree* gtNewIconEmbHndNode(void* value, void* pValue, GenTreeFlags flags, void* compileTimeHandle); GenTree* gtNewIconEmbScpHndNode(CORINFO_MODULE_HANDLE scpHnd); GenTree* gtNewIconEmbClsHndNode(CORINFO_CLASS_HANDLE clsHnd); GenTree* gtNewIconEmbMethHndNode(CORINFO_METHOD_HANDLE methHnd); GenTree* gtNewIconEmbFldHndNode(CORINFO_FIELD_HANDLE fldHnd); GenTree* gtNewStringLiteralNode(InfoAccessType iat, void* pValue); GenTreeIntCon* gtNewStringLiteralLength(GenTreeStrCon* node); GenTree* gtNewLconNode(__int64 value); GenTree* gtNewDconNode(double value, var_types type = TYP_DOUBLE); GenTree* gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle); GenTree* gtNewZeroConNode(var_types type); GenTree* gtNewOneConNode(var_types type); GenTreeLclVar* gtNewStoreLclVar(unsigned dstLclNum, GenTree* src); #ifdef FEATURE_SIMD GenTree* gtNewSIMDVectorZero(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize); #endif GenTree* gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile, bool isCopyBlock); GenTree* gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg); GenTree* gtNewBitCastNode(var_types type, GenTree* arg); protected: void gtBlockOpInit(GenTree* result, GenTree* dst, GenTree* srcOrFillVal, bool isVolatile); public: GenTreeObj* gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr); void gtSetObjGcInfo(GenTreeObj* objNode); GenTree* gtNewStructVal(CORINFO_CLASS_HANDLE structHnd, GenTree* addr); GenTree* gtNewBlockVal(GenTree* addr, unsigned size); GenTree* gtNewCpObjNode(GenTree* dst, GenTree* src, CORINFO_CLASS_HANDLE structHnd, bool isVolatile); GenTreeCall::Use* gtNewCallArgs(GenTree* node); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3, GenTree* node4); GenTreeCall::Use* gtPrependNewCallArg(GenTree* node, GenTreeCall::Use* args); GenTreeCall::Use* gtInsertNewCallArgAfter(GenTree* node, GenTreeCall::Use* after); GenTreeCall* gtNewCallNode(gtCallTypes callType, CORINFO_METHOD_HANDLE handle, var_types type, GenTreeCall::Use* args, const DebugInfo& di = DebugInfo()); GenTreeCall* gtNewIndCallNode(GenTree* addr, var_types type, GenTreeCall::Use* args, const DebugInfo& di = DebugInfo()); GenTreeCall* gtNewHelperCallNode(unsigned helper, var_types type, GenTreeCall::Use* args = nullptr); GenTreeCall* gtNewRuntimeLookupHelperCallNode(CORINFO_RUNTIME_LOOKUP* pRuntimeLookup, GenTree* ctxTree, void* compileTimeHandle); GenTreeLclVar* gtNewLclvNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET)); GenTreeLclVar* gtNewLclLNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET)); GenTreeLclVar* gtNewLclVarAddrNode(unsigned lclNum, var_types type = TYP_I_IMPL); GenTreeLclFld* gtNewLclFldAddrNode(unsigned lclNum, unsigned lclOffs, FieldSeqNode* fieldSeq, var_types type = TYP_I_IMPL); #ifdef FEATURE_SIMD GenTreeSIMD* gtNewSIMDNode( var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize); GenTreeSIMD* gtNewSIMDNode(var_types type, GenTree* op1, GenTree* op2, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize); void SetOpLclRelatedToSIMDIntrinsic(GenTree* op); #endif #ifdef FEATURE_HW_INTRINSICS GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, GenTree* op4, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree** operands, size_t operandCount, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, IntrinsicNodeBuilder&& nodeBuilder, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode( var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, op2, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTree* gtNewSimdAbsNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdBinOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCeilNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpAllNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpAnyNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCndSelNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCreateBroadcastNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdDotProdNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdFloorNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdGetElementNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdMaxNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdMinNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdNarrowNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdSqrtNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdSumNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdUnOpNode(genTreeOps op, var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWidenLowerNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWidenUpperNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWithElementNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdZeroNode(var_types type, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode( var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID); CORINFO_CLASS_HANDLE gtGetStructHandleForHWSIMD(var_types simdType, CorInfoType simdBaseJitType); CorInfoType getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType); #endif // FEATURE_HW_INTRINSICS GenTree* gtNewMustThrowException(unsigned helper, var_types type, CORINFO_CLASS_HANDLE clsHnd); GenTreeLclFld* gtNewLclFldNode(unsigned lnum, var_types type, unsigned offset); GenTree* gtNewInlineCandidateReturnExpr(GenTree* inlineCandidate, var_types type, BasicBlockFlags bbFlags); GenTreeField* gtNewFieldRef(var_types type, CORINFO_FIELD_HANDLE fldHnd, GenTree* obj = nullptr, DWORD offset = 0); GenTree* gtNewIndexRef(var_types typ, GenTree* arrayOp, GenTree* indexOp); GenTreeArrLen* gtNewArrLen(var_types typ, GenTree* arrayOp, int lenOffset, BasicBlock* block); GenTreeIndir* gtNewIndir(var_types typ, GenTree* addr); GenTree* gtNewNullCheck(GenTree* addr, BasicBlock* basicBlock); var_types gtTypeForNullCheck(GenTree* tree); void gtChangeOperToNullCheck(GenTree* tree, BasicBlock* block); static fgArgTabEntry* gtArgEntryByArgNum(GenTreeCall* call, unsigned argNum); static fgArgTabEntry* gtArgEntryByNode(GenTreeCall* call, GenTree* node); fgArgTabEntry* gtArgEntryByLateArgIndex(GenTreeCall* call, unsigned lateArgInx); static GenTree* gtArgNodeByLateArgInx(GenTreeCall* call, unsigned lateArgInx); GenTreeOp* gtNewAssignNode(GenTree* dst, GenTree* src); GenTree* gtNewTempAssign(unsigned tmp, GenTree* val, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* gtNewRefCOMfield(GenTree* objPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp, CORINFO_CLASS_HANDLE structType, GenTree* assg); GenTree* gtNewNothingNode(); GenTree* gtNewArgPlaceHolderNode(var_types type, CORINFO_CLASS_HANDLE clsHnd); GenTree* gtUnusedValNode(GenTree* expr); GenTree* gtNewKeepAliveNode(GenTree* op); GenTreeCast* gtNewCastNode(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType); GenTreeCast* gtNewCastNodeL(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType); GenTreeAllocObj* gtNewAllocObjNode( unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, var_types type, GenTree* op1); GenTreeAllocObj* gtNewAllocObjNode(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool useParent); GenTree* gtNewRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* lookupTree); GenTreeIndir* gtNewMethodTableLookup(GenTree* obj); //------------------------------------------------------------------------ // Other GenTree functions GenTree* gtClone(GenTree* tree, bool complexOK = false); // If `tree` is a lclVar with lclNum `varNum`, return an IntCns with value `varVal`; otherwise, // create a copy of `tree`, adding specified flags, replacing uses of lclVar `deepVarNum` with // IntCnses with value `deepVarVal`. GenTree* gtCloneExpr( GenTree* tree, GenTreeFlags addFlags, unsigned varNum, int varVal, unsigned deepVarNum, int deepVarVal); // Create a copy of `tree`, optionally adding specifed flags, and optionally mapping uses of local // `varNum` to int constants with value `varVal`. GenTree* gtCloneExpr(GenTree* tree, GenTreeFlags addFlags = GTF_EMPTY, unsigned varNum = BAD_VAR_NUM, int varVal = 0) { return gtCloneExpr(tree, addFlags, varNum, varVal, varNum, varVal); } Statement* gtCloneStmt(Statement* stmt) { GenTree* exprClone = gtCloneExpr(stmt->GetRootNode()); return gtNewStmt(exprClone, stmt->GetDebugInfo()); } // Internal helper for cloning a call GenTreeCall* gtCloneExprCallHelper(GenTreeCall* call, GenTreeFlags addFlags = GTF_EMPTY, unsigned deepVarNum = BAD_VAR_NUM, int deepVarVal = 0); // Create copy of an inline or guarded devirtualization candidate tree. GenTreeCall* gtCloneCandidateCall(GenTreeCall* call); void gtUpdateSideEffects(Statement* stmt, GenTree* tree); void gtUpdateTreeAncestorsSideEffects(GenTree* tree); void gtUpdateStmtSideEffects(Statement* stmt); void gtUpdateNodeSideEffects(GenTree* tree); void gtUpdateNodeOperSideEffects(GenTree* tree); void gtUpdateNodeOperSideEffectsPost(GenTree* tree); // Returns "true" iff the complexity (not formally defined, but first interpretation // is #of nodes in subtree) of "tree" is greater than "limit". // (This is somewhat redundant with the "GetCostEx()/GetCostSz()" fields, but can be used // before they have been set.) bool gtComplexityExceeds(GenTree** tree, unsigned limit); GenTree* gtReverseCond(GenTree* tree); static bool gtHasRef(GenTree* tree, ssize_t lclNum); bool gtHasLocalsWithAddrOp(GenTree* tree); unsigned gtSetCallArgsOrder(const GenTreeCall::UseList& args, bool lateArgs, int* callCostEx, int* callCostSz); unsigned gtSetMultiOpOrder(GenTreeMultiOp* multiOp); void gtWalkOp(GenTree** op1, GenTree** op2, GenTree* base, bool constOnly); #ifdef DEBUG unsigned gtHashValue(GenTree* tree); GenTree* gtWalkOpEffectiveVal(GenTree* op); #endif void gtPrepareCost(GenTree* tree); bool gtIsLikelyRegVar(GenTree* tree); // Returns true iff the secondNode can be swapped with firstNode. bool gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode); // Given an address expression, compute its costs and addressing mode opportunities, // and mark addressing mode candidates as GTF_DONT_CSE. // TODO-Throughput - Consider actually instantiating these early, to avoid // having to re-run the algorithm that looks for them (might also improve CQ). bool gtMarkAddrMode(GenTree* addr, int* costEx, int* costSz, var_types type); unsigned gtSetEvalOrder(GenTree* tree); void gtSetStmtInfo(Statement* stmt); // Returns "true" iff "node" has any of the side effects in "flags". bool gtNodeHasSideEffects(GenTree* node, GenTreeFlags flags); // Returns "true" iff "tree" or its (transitive) children have any of the side effects in "flags". bool gtTreeHasSideEffects(GenTree* tree, GenTreeFlags flags); // Appends 'expr' in front of 'list' // 'list' will typically start off as 'nullptr' // when 'list' is non-null a GT_COMMA node is used to insert 'expr' GenTree* gtBuildCommaList(GenTree* list, GenTree* expr); void gtExtractSideEffList(GenTree* expr, GenTree** pList, GenTreeFlags GenTreeFlags = GTF_SIDE_EFFECT, bool ignoreRoot = false); GenTree* gtGetThisArg(GenTreeCall* call); // Static fields of struct types (and sometimes the types that those are reduced to) are represented by having the // static field contain an object pointer to the boxed struct. This simplifies the GC implementation...but // complicates the JIT somewhat. This predicate returns "true" iff a node with type "fieldNodeType", representing // the given "fldHnd", is such an object pointer. bool gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd); // Return true if call is a recursive call; return false otherwise. // Note when inlining, this looks for calls back to the root method. bool gtIsRecursiveCall(GenTreeCall* call) { return gtIsRecursiveCall(call->gtCallMethHnd); } bool gtIsRecursiveCall(CORINFO_METHOD_HANDLE callMethodHandle) { return (callMethodHandle == impInlineRoot()->info.compMethodHnd); } //------------------------------------------------------------------------- GenTree* gtFoldExpr(GenTree* tree); GenTree* gtFoldExprConst(GenTree* tree); GenTree* gtFoldExprSpecial(GenTree* tree); GenTree* gtFoldBoxNullable(GenTree* tree); GenTree* gtFoldExprCompare(GenTree* tree); GenTree* gtCreateHandleCompare(genTreeOps oper, GenTree* op1, GenTree* op2, CorInfoInlineTypeCheck typeCheckInliningResult); GenTree* gtFoldExprCall(GenTreeCall* call); GenTree* gtFoldTypeCompare(GenTree* tree); GenTree* gtFoldTypeEqualityCall(bool isEq, GenTree* op1, GenTree* op2); // Options to control behavior of gtTryRemoveBoxUpstreamEffects enum BoxRemovalOptions { BR_REMOVE_AND_NARROW, // remove effects, minimize remaining work, return possibly narrowed source tree BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE, // remove effects and minimize remaining work, return type handle tree BR_REMOVE_BUT_NOT_NARROW, // remove effects, return original source tree BR_DONT_REMOVE, // check if removal is possible, return copy source tree BR_DONT_REMOVE_WANT_TYPE_HANDLE, // check if removal is possible, return type handle tree BR_MAKE_LOCAL_COPY // revise box to copy to temp local and return local's address }; GenTree* gtTryRemoveBoxUpstreamEffects(GenTree* tree, BoxRemovalOptions options = BR_REMOVE_AND_NARROW); GenTree* gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp); //------------------------------------------------------------------------- // Get the handle, if any. CORINFO_CLASS_HANDLE gtGetStructHandleIfPresent(GenTree* tree); // Get the handle, and assert if not found. CORINFO_CLASS_HANDLE gtGetStructHandle(GenTree* tree); // Get the handle for a ref type. CORINFO_CLASS_HANDLE gtGetClassHandle(GenTree* tree, bool* pIsExact, bool* pIsNonNull); // Get the class handle for an helper call CORINFO_CLASS_HANDLE gtGetHelperCallClassHandle(GenTreeCall* call, bool* pIsExact, bool* pIsNonNull); // Get the element handle for an array of ref type. CORINFO_CLASS_HANDLE gtGetArrayElementClassHandle(GenTree* array); // Get a class handle from a helper call argument CORINFO_CLASS_HANDLE gtGetHelperArgClassHandle(GenTree* array); // Get the class handle for a field CORINFO_CLASS_HANDLE gtGetFieldClassHandle(CORINFO_FIELD_HANDLE fieldHnd, bool* pIsExact, bool* pIsNonNull); // Check if this tree is a gc static base helper call bool gtIsStaticGCBaseHelperCall(GenTree* tree); //------------------------------------------------------------------------- // Functions to display the trees #ifdef DEBUG void gtDispNode(GenTree* tree, IndentStack* indentStack, _In_z_ const char* msg, bool isLIR); void gtDispConst(GenTree* tree); void gtDispLeaf(GenTree* tree, IndentStack* indentStack); void gtDispNodeName(GenTree* tree); #if FEATURE_MULTIREG_RET unsigned gtDispMultiRegCount(GenTree* tree); #endif void gtDispRegVal(GenTree* tree); void gtDispZeroFieldSeq(GenTree* tree); void gtDispVN(GenTree* tree); void gtDispCommonEndLine(GenTree* tree); enum IndentInfo { IINone, IIArc, IIArcTop, IIArcBottom, IIEmbedded, IIError, IndentInfoCount }; void gtDispChild(GenTree* child, IndentStack* indentStack, IndentInfo arcType, _In_opt_ const char* msg = nullptr, bool topOnly = false); void gtDispTree(GenTree* tree, IndentStack* indentStack = nullptr, _In_opt_ const char* msg = nullptr, bool topOnly = false, bool isLIR = false); void gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut); int gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining); char* gtGetLclVarName(unsigned lclNum); void gtDispLclVar(unsigned lclNum, bool padForBiggestDisp = true); void gtDispLclVarStructType(unsigned lclNum); void gtDispClassLayout(ClassLayout* layout, var_types type); void gtDispILLocation(const ILLocation& loc); void gtDispStmt(Statement* stmt, const char* msg = nullptr); void gtDispBlockStmts(BasicBlock* block); void gtGetArgMsg(GenTreeCall* call, GenTree* arg, unsigned argNum, char* bufp, unsigned bufLength); void gtGetLateArgMsg(GenTreeCall* call, GenTree* arg, int argNum, char* bufp, unsigned bufLength); void gtDispArgList(GenTreeCall* call, GenTree* lastCallOperand, IndentStack* indentStack); void gtDispAnyFieldSeq(FieldSeqNode* fieldSeq); void gtDispFieldSeq(FieldSeqNode* pfsn); void gtDispRange(LIR::ReadOnlyRange const& range); void gtDispTreeRange(LIR::Range& containingRange, GenTree* tree); void gtDispLIRNode(GenTree* node, const char* prefixMsg = nullptr); #endif // For tree walks enum fgWalkResult { WALK_CONTINUE, WALK_SKIP_SUBTREES, WALK_ABORT }; struct fgWalkData; typedef fgWalkResult(fgWalkPreFn)(GenTree** pTree, fgWalkData* data); typedef fgWalkResult(fgWalkPostFn)(GenTree** pTree, fgWalkData* data); static fgWalkPreFn gtMarkColonCond; static fgWalkPreFn gtClearColonCond; struct FindLinkData { GenTree* nodeToFind; GenTree** result; GenTree* parent; }; FindLinkData gtFindLink(Statement* stmt, GenTree* node); bool gtHasCatchArg(GenTree* tree); typedef ArrayStack<GenTree*> GenTreeStack; static bool gtHasCallOnStack(GenTreeStack* parentStack); //========================================================================= // BasicBlock functions #ifdef DEBUG // This is a debug flag we will use to assert when creating block during codegen // as this interferes with procedure splitting. If you know what you're doing, set // it to true before creating the block. (DEBUG only) bool fgSafeBasicBlockCreation; #endif BasicBlock* bbNewBasicBlock(BBjumpKinds jumpKind); void placeLoopAlignInstructions(); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX LclVarsInfo XX XX XX XX The variables to be used by the code generator. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // // For both PROMOTION_TYPE_NONE and PROMOTION_TYPE_DEPENDENT the struct will // be placed in the stack frame and it's fields must be laid out sequentially. // // For PROMOTION_TYPE_INDEPENDENT each of the struct's fields is replaced by // a local variable that can be enregistered or placed in the stack frame. // The fields do not need to be laid out sequentially // enum lvaPromotionType { PROMOTION_TYPE_NONE, // The struct local is not promoted PROMOTION_TYPE_INDEPENDENT, // The struct local is promoted, // and its field locals are independent of its parent struct local. PROMOTION_TYPE_DEPENDENT // The struct local is promoted, // but its field locals depend on its parent struct local. }; /*****************************************************************************/ enum FrameLayoutState { NO_FRAME_LAYOUT, INITIAL_FRAME_LAYOUT, PRE_REGALLOC_FRAME_LAYOUT, REGALLOC_FRAME_LAYOUT, TENTATIVE_FRAME_LAYOUT, FINAL_FRAME_LAYOUT }; public: RefCountState lvaRefCountState; // Current local ref count state bool lvaLocalVarRefCounted() const { return lvaRefCountState == RCS_NORMAL; } bool lvaTrackedFixed; // true: We cannot add new 'tracked' variable unsigned lvaCount; // total number of locals, which includes function arguments, // special arguments, IL local variables, and JIT temporary variables LclVarDsc* lvaTable; // variable descriptor table unsigned lvaTableCnt; // lvaTable size (>= lvaCount) unsigned lvaTrackedCount; // actual # of locals being tracked unsigned lvaTrackedCountInSizeTUnits; // min # of size_t's sufficient to hold a bit for all the locals being tracked #ifdef DEBUG VARSET_TP lvaTrackedVars; // set of tracked variables #endif #ifndef TARGET_64BIT VARSET_TP lvaLongVars; // set of long (64-bit) variables #endif VARSET_TP lvaFloatVars; // set of floating-point (32-bit and 64-bit) variables unsigned lvaCurEpoch; // VarSets are relative to a specific set of tracked var indices. // It that changes, this changes. VarSets from different epochs // cannot be meaningfully combined. unsigned GetCurLVEpoch() { return lvaCurEpoch; } // reverse map of tracked number to var number unsigned lvaTrackedToVarNumSize; unsigned* lvaTrackedToVarNum; #if DOUBLE_ALIGN #ifdef DEBUG // # of procs compiled a with double-aligned stack static unsigned s_lvaDoubleAlignedProcsCount; #endif #endif // Getters and setters for address-exposed and do-not-enregister local var properties. bool lvaVarAddrExposed(unsigned varNum) const; void lvaSetVarAddrExposed(unsigned varNum DEBUGARG(AddressExposedReason reason)); void lvaSetVarLiveInOutOfHandler(unsigned varNum); bool lvaVarDoNotEnregister(unsigned varNum); void lvSetMinOptsDoNotEnreg(); bool lvaEnregEHVars; bool lvaEnregMultiRegVars; void lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason)); unsigned lvaVarargsHandleArg; #ifdef TARGET_X86 unsigned lvaVarargsBaseOfStkArgs; // Pointer (computed based on incoming varargs handle) to the start of the stack // arguments #endif // TARGET_X86 unsigned lvaInlinedPInvokeFrameVar; // variable representing the InlinedCallFrame unsigned lvaReversePInvokeFrameVar; // variable representing the reverse PInvoke frame #if FEATURE_FIXED_OUT_ARGS unsigned lvaPInvokeFrameRegSaveVar; // variable representing the RegSave for PInvoke inlining. #endif unsigned lvaMonAcquired; // boolean variable introduced into in synchronized methods // that tracks whether the lock has been taken unsigned lvaArg0Var; // The lclNum of arg0. Normally this will be info.compThisArg. // However, if there is a "ldarga 0" or "starg 0" in the IL, // we will redirect all "ldarg(a) 0" and "starg 0" to this temp. unsigned lvaInlineeReturnSpillTemp; // The temp to spill the non-VOID return expression // in case there are multiple BBJ_RETURN blocks in the inlinee // or if the inlinee has GC ref locals. #if FEATURE_FIXED_OUT_ARGS unsigned lvaOutgoingArgSpaceVar; // dummy TYP_LCLBLK var for fixed outgoing argument space PhasedVar<unsigned> lvaOutgoingArgSpaceSize; // size of fixed outgoing argument space #endif // FEATURE_FIXED_OUT_ARGS static unsigned GetOutgoingArgByteSize(unsigned sizeWithoutPadding) { return roundUp(sizeWithoutPadding, TARGET_POINTER_SIZE); } // Variable representing the return address. The helper-based tailcall // mechanism passes the address of the return address to a runtime helper // where it is used to detect tail-call chains. unsigned lvaRetAddrVar; #if defined(DEBUG) && defined(TARGET_XARCH) unsigned lvaReturnSpCheck; // Stores SP to confirm it is not corrupted on return. #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) unsigned lvaCallSpCheck; // Stores SP to confirm it is not corrupted after every call. #endif // defined(DEBUG) && defined(TARGET_X86) bool lvaGenericsContextInUse; bool lvaKeepAliveAndReportThis(); // Synchronized instance method of a reference type, or // CORINFO_GENERICS_CTXT_FROM_THIS? bool lvaReportParamTypeArg(); // Exceptions and CORINFO_GENERICS_CTXT_FROM_PARAMTYPEARG? //------------------------------------------------------------------------- // All these frame offsets are inter-related and must be kept in sync #if !defined(FEATURE_EH_FUNCLETS) // This is used for the callable handlers unsigned lvaShadowSPslotsVar; // TYP_BLK variable for all the shadow SP slots #endif // FEATURE_EH_FUNCLETS int lvaCachedGenericContextArgOffs; int lvaCachedGenericContextArgOffset(); // For CORINFO_CALLCONV_PARAMTYPE and if generic context is passed as // THIS pointer #ifdef JIT32_GCENCODER unsigned lvaLocAllocSPvar; // variable which stores the value of ESP after the the last alloca/localloc #endif // JIT32_GCENCODER unsigned lvaNewObjArrayArgs; // variable with arguments for new MD array helper // TODO-Review: Prior to reg predict we reserve 24 bytes for Spill temps. // after the reg predict we will use a computed maxTmpSize // which is based upon the number of spill temps predicted by reg predict // All this is necessary because if we under-estimate the size of the spill // temps we could fail when encoding instructions that reference stack offsets for ARM. // // Pre codegen max spill temp size. static const unsigned MAX_SPILL_TEMP_SIZE = 24; //------------------------------------------------------------------------- unsigned lvaGetMaxSpillTempSize(); #ifdef TARGET_ARM bool lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask); #endif // TARGET_ARM void lvaAssignFrameOffsets(FrameLayoutState curState); void lvaFixVirtualFrameOffsets(); void lvaUpdateArgWithInitialReg(LclVarDsc* varDsc); void lvaUpdateArgsWithInitialReg(); void lvaAssignVirtualFrameOffsetsToArgs(); #ifdef UNIX_AMD64_ABI int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs, int* callerArgOffset); #else // !UNIX_AMD64_ABI int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs); #endif // !UNIX_AMD64_ABI void lvaAssignVirtualFrameOffsetsToLocals(); int lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs); #ifdef TARGET_AMD64 // Returns true if compCalleeRegsPushed (including RBP if used as frame pointer) is even. bool lvaIsCalleeSavedIntRegCountEven(); #endif void lvaAlignFrame(); void lvaAssignFrameOffsetsToPromotedStructs(); int lvaAllocateTemps(int stkOffs, bool mustDoubleAlign); #ifdef DEBUG void lvaDumpRegLocation(unsigned lclNum); void lvaDumpFrameLocation(unsigned lclNum); void lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth = 6); void lvaTableDump(FrameLayoutState curState = NO_FRAME_LAYOUT); // NO_FRAME_LAYOUT means use the current frame // layout state defined by lvaDoneFrameLayout #endif // Limit frames size to 1GB. The maximum is 2GB in theory - make it intentionally smaller // to avoid bugs from borderline cases. #define MAX_FrameSize 0x3FFFFFFF void lvaIncrementFrameSize(unsigned size); unsigned lvaFrameSize(FrameLayoutState curState); // Returns the caller-SP-relative offset for the SP/FP relative offset determined by FP based. int lvaToCallerSPRelativeOffset(int offs, bool isFpBased, bool forRootFrame = true) const; // Returns the caller-SP-relative offset for the local variable "varNum." int lvaGetCallerSPRelativeOffset(unsigned varNum); // Returns the SP-relative offset for the local variable "varNum". Illegal to ask this for functions with localloc. int lvaGetSPRelativeOffset(unsigned varNum); int lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased); int lvaGetInitialSPRelativeOffset(unsigned varNum); // True if this is an OSR compilation and this local is potentially // located on the original method stack frame. bool lvaIsOSRLocal(unsigned varNum); //------------------------ For splitting types ---------------------------- void lvaInitTypeRef(); void lvaInitArgs(InitVarDscInfo* varDscInfo); void lvaInitThisPtr(InitVarDscInfo* varDscInfo); void lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBufReg); void lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, unsigned takeArgs); void lvaInitGenericsCtxt(InitVarDscInfo* varDscInfo); void lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo); void lvaInitVarDsc(LclVarDsc* varDsc, unsigned varNum, CorInfoType corInfoType, CORINFO_CLASS_HANDLE typeHnd, CORINFO_ARG_LIST_HANDLE varList, CORINFO_SIG_INFO* varSig); static unsigned lvaTypeRefMask(var_types type); var_types lvaGetActualType(unsigned lclNum); var_types lvaGetRealType(unsigned lclNum); //------------------------------------------------------------------------- void lvaInit(); LclVarDsc* lvaGetDesc(unsigned lclNum) { assert(lclNum < lvaCount); return &lvaTable[lclNum]; } LclVarDsc* lvaGetDesc(unsigned lclNum) const { assert(lclNum < lvaCount); return &lvaTable[lclNum]; } LclVarDsc* lvaGetDesc(const GenTreeLclVarCommon* lclVar) { return lvaGetDesc(lclVar->GetLclNum()); } unsigned lvaTrackedIndexToLclNum(unsigned trackedIndex) { assert(trackedIndex < lvaTrackedCount); unsigned lclNum = lvaTrackedToVarNum[trackedIndex]; assert(lclNum < lvaCount); return lclNum; } LclVarDsc* lvaGetDescByTrackedIndex(unsigned trackedIndex) { return lvaGetDesc(lvaTrackedIndexToLclNum(trackedIndex)); } unsigned lvaGetLclNum(const LclVarDsc* varDsc) { assert((lvaTable <= varDsc) && (varDsc < lvaTable + lvaCount)); // varDsc must point within the table assert(((char*)varDsc - (char*)lvaTable) % sizeof(LclVarDsc) == 0); // varDsc better not point in the middle of a variable unsigned varNum = (unsigned)(varDsc - lvaTable); assert(varDsc == &lvaTable[varNum]); return varNum; } unsigned lvaLclSize(unsigned varNum); unsigned lvaLclExactSize(unsigned varNum); bool lvaHaveManyLocals() const; unsigned lvaGrabTemp(bool shortLifetime DEBUGARG(const char* reason)); unsigned lvaGrabTemps(unsigned cnt DEBUGARG(const char* reason)); unsigned lvaGrabTempWithImplicitUse(bool shortLifetime DEBUGARG(const char* reason)); void lvaSortByRefCount(); void lvaMarkLocalVars(); // Local variable ref-counting void lvaComputeRefCounts(bool isRecompute, bool setSlotNumbers); void lvaMarkLocalVars(BasicBlock* block, bool isRecompute); void lvaAllocOutgoingArgSpaceVar(); // Set up lvaOutgoingArgSpaceVar VARSET_VALRET_TP lvaStmtLclMask(Statement* stmt); #ifdef DEBUG struct lvaStressLclFldArgs { Compiler* m_pCompiler; bool m_bFirstPass; }; static fgWalkPreFn lvaStressLclFldCB; void lvaStressLclFld(); void lvaDispVarSet(VARSET_VALARG_TP set, VARSET_VALARG_TP allVars); void lvaDispVarSet(VARSET_VALARG_TP set); #endif #ifdef TARGET_ARM int lvaFrameAddress(int varNum, bool mustBeFPBased, regNumber* pBaseReg, int addrModeOffset, bool isFloatUsage); #else int lvaFrameAddress(int varNum, bool* pFPbased); #endif bool lvaIsParameter(unsigned varNum); bool lvaIsRegArgument(unsigned varNum); bool lvaIsOriginalThisArg(unsigned varNum); // Is this varNum the original this argument? bool lvaIsOriginalThisReadOnly(); // return true if there is no place in the code // that writes to arg0 // For x64 this is 3, 5, 6, 7, >8 byte structs that are passed by reference. // For ARM64, this is structs larger than 16 bytes that are passed by reference. bool lvaIsImplicitByRefLocal(unsigned varNum) { #if defined(TARGET_AMD64) || defined(TARGET_ARM64) LclVarDsc* varDsc = lvaGetDesc(varNum); if (varDsc->lvIsImplicitByRef) { assert(varDsc->lvIsParam); assert(varTypeIsStruct(varDsc) || (varDsc->lvType == TYP_BYREF)); return true; } #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) return false; } // Returns true if this local var is a multireg struct bool lvaIsMultiregStruct(LclVarDsc* varDsc, bool isVararg); // If the local is a TYP_STRUCT, get/set a class handle describing it CORINFO_CLASS_HANDLE lvaGetStruct(unsigned varNum); void lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool unsafeValueClsCheck, bool setTypeInfo = true); void lvaSetStructUsedAsVarArg(unsigned varNum); // If the local is TYP_REF, set or update the associated class information. void lvaSetClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false); void lvaSetClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr); void lvaUpdateClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false); void lvaUpdateClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr); #define MAX_NumOfFieldsInPromotableStruct 4 // Maximum number of fields in promotable struct // Info about struct type fields. struct lvaStructFieldInfo { CORINFO_FIELD_HANDLE fldHnd; unsigned char fldOffset; unsigned char fldOrdinal; var_types fldType; unsigned fldSize; CORINFO_CLASS_HANDLE fldTypeHnd; lvaStructFieldInfo() : fldHnd(nullptr), fldOffset(0), fldOrdinal(0), fldType(TYP_UNDEF), fldSize(0), fldTypeHnd(nullptr) { } }; // Info about a struct type, instances of which may be candidates for promotion. struct lvaStructPromotionInfo { CORINFO_CLASS_HANDLE typeHnd; bool canPromote; bool containsHoles; bool customLayout; bool fieldsSorted; unsigned char fieldCnt; lvaStructFieldInfo fields[MAX_NumOfFieldsInPromotableStruct]; lvaStructPromotionInfo(CORINFO_CLASS_HANDLE typeHnd = nullptr) : typeHnd(typeHnd) , canPromote(false) , containsHoles(false) , customLayout(false) , fieldsSorted(false) , fieldCnt(0) { } }; struct lvaFieldOffsetCmp { bool operator()(const lvaStructFieldInfo& field1, const lvaStructFieldInfo& field2); }; // This class is responsible for checking validity and profitability of struct promotion. // If it is both legal and profitable, then TryPromoteStructVar promotes the struct and initializes // nessesary information for fgMorphStructField to use. class StructPromotionHelper { public: StructPromotionHelper(Compiler* compiler); bool CanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd); bool TryPromoteStructVar(unsigned lclNum); void Clear() { structPromotionInfo.typeHnd = NO_CLASS_HANDLE; } #ifdef DEBUG void CheckRetypedAsScalar(CORINFO_FIELD_HANDLE fieldHnd, var_types requestedType); #endif // DEBUG private: bool CanPromoteStructVar(unsigned lclNum); bool ShouldPromoteStructVar(unsigned lclNum); void PromoteStructVar(unsigned lclNum); void SortStructFields(); bool CanConstructAndPromoteField(lvaStructPromotionInfo* structPromotionInfo); lvaStructFieldInfo GetFieldInfo(CORINFO_FIELD_HANDLE fieldHnd, BYTE ordinal); bool TryPromoteStructField(lvaStructFieldInfo& outerFieldInfo); private: Compiler* compiler; lvaStructPromotionInfo structPromotionInfo; #ifdef DEBUG typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<CORINFO_FIELD_STRUCT_>, var_types> RetypedAsScalarFieldsMap; RetypedAsScalarFieldsMap retypedFieldsMap; #endif // DEBUG }; StructPromotionHelper* structPromotionHelper; unsigned lvaGetFieldLocal(const LclVarDsc* varDsc, unsigned int fldOffset); lvaPromotionType lvaGetPromotionType(const LclVarDsc* varDsc); lvaPromotionType lvaGetPromotionType(unsigned varNum); lvaPromotionType lvaGetParentPromotionType(const LclVarDsc* varDsc); lvaPromotionType lvaGetParentPromotionType(unsigned varNum); bool lvaIsFieldOfDependentlyPromotedStruct(const LclVarDsc* varDsc); bool lvaIsGCTracked(const LclVarDsc* varDsc); #if defined(FEATURE_SIMD) bool lvaMapSimd12ToSimd16(const LclVarDsc* varDsc) { assert(varDsc->lvType == TYP_SIMD12); assert(varDsc->lvExactSize == 12); #if defined(TARGET_64BIT) assert(compMacOsArm64Abi() || varDsc->lvSize() == 16); #endif // defined(TARGET_64BIT) // We make local variable SIMD12 types 16 bytes instead of just 12. // lvSize() will return 16 bytes for SIMD12, even for fields. // However, we can't do that mapping if the var is a dependently promoted struct field. // Such a field must remain its exact size within its parent struct unless it is a single // field *and* it is the only field in a struct of 16 bytes. if (varDsc->lvSize() != 16) { return false; } if (lvaIsFieldOfDependentlyPromotedStruct(varDsc)) { LclVarDsc* parentVarDsc = lvaGetDesc(varDsc->lvParentLcl); return (parentVarDsc->lvFieldCnt == 1) && (parentVarDsc->lvSize() == 16); } return true; } #endif // defined(FEATURE_SIMD) unsigned lvaGSSecurityCookie; // LclVar number bool lvaTempsHaveLargerOffsetThanVars(); // Returns "true" iff local variable "lclNum" is in SSA form. bool lvaInSsa(unsigned lclNum) { assert(lclNum < lvaCount); return lvaTable[lclNum].lvInSsa; } unsigned lvaStubArgumentVar; // variable representing the secret stub argument coming in EAX #if defined(FEATURE_EH_FUNCLETS) unsigned lvaPSPSym; // variable representing the PSPSym #endif InlineInfo* impInlineInfo; // Only present for inlinees InlineStrategy* m_inlineStrategy; InlineContext* compInlineContext; // Always present // The Compiler* that is the root of the inlining tree of which "this" is a member. Compiler* impInlineRoot(); #if defined(DEBUG) || defined(INLINE_DATA) unsigned __int64 getInlineCycleCount() { return m_compCycles; } #endif // defined(DEBUG) || defined(INLINE_DATA) bool fgNoStructPromotion; // Set to TRUE to turn off struct promotion for this method. bool fgNoStructParamPromotion; // Set to TRUE to turn off struct promotion for parameters this method. //========================================================================= // PROTECTED //========================================================================= protected: //---------------- Local variable ref-counting ---------------------------- void lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, bool isRecompute); bool IsDominatedByExceptionalEntry(BasicBlock* block); void SetVolatileHint(LclVarDsc* varDsc); // Keeps the mapping from SSA #'s to VN's for the implicit memory variables. SsaDefArray<SsaMemDef> lvMemoryPerSsaData; public: // Returns the address of the per-Ssa data for memory at the given ssaNum (which is required // not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is // not an SSA variable). SsaMemDef* GetMemoryPerSsaData(unsigned ssaNum) { return lvMemoryPerSsaData.GetSsaDef(ssaNum); } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Importer XX XX XX XX Imports the given method and converts it to semantic trees XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ private: // For prefixFlags enum { PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix PREFIX_TAILCALL_IMPLICIT = 0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix PREFIX_TAILCALL_STRESS = 0x00000100, // call doesn't "tail" IL prefix but is treated as explicit because of tail call stress PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT | PREFIX_TAILCALL_STRESS), PREFIX_VOLATILE = 0x00001000, PREFIX_UNALIGNED = 0x00010000, PREFIX_CONSTRAINED = 0x00100000, PREFIX_READONLY = 0x01000000 }; static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix); static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp); static bool impOpcodeIsCallOpcode(OPCODE opcode); public: void impInit(); void impImport(); CORINFO_CLASS_HANDLE impGetRefAnyClass(); CORINFO_CLASS_HANDLE impGetRuntimeArgumentHandle(); CORINFO_CLASS_HANDLE impGetTypeHandleClass(); CORINFO_CLASS_HANDLE impGetStringClass(); CORINFO_CLASS_HANDLE impGetObjectClass(); // Returns underlying type of handles returned by ldtoken instruction var_types GetRuntimeHandleUnderlyingType() { // RuntimeTypeHandle is backed by raw pointer on CoreRT and by object reference on other runtimes return IsTargetAbi(CORINFO_CORERT_ABI) ? TYP_I_IMPL : TYP_REF; } void impDevirtualizeCall(GenTreeCall* call, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_METHOD_HANDLE* method, unsigned* methodFlags, CORINFO_CONTEXT_HANDLE* contextHandle, CORINFO_CONTEXT_HANDLE* exactContextHandle, bool isLateDevirtualization, bool isExplicitTailCall, IL_OFFSET ilOffset = BAD_IL_OFFSET); //========================================================================= // PROTECTED //========================================================================= protected: //-------------------- Stack manipulation --------------------------------- unsigned impStkSize; // Size of the full stack #define SMALL_STACK_SIZE 16 // number of elements in impSmallStack struct SavedStack // used to save/restore stack contents. { unsigned ssDepth; // number of values on stack StackEntry* ssTrees; // saved tree values }; bool impIsPrimitive(CorInfoType type); bool impILConsumesAddr(const BYTE* codeAddr); void impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind); void impPushOnStack(GenTree* tree, typeInfo ti); void impPushNullObjRefOnStack(); StackEntry impPopStack(); StackEntry& impStackTop(unsigned n = 0); unsigned impStackHeight(); void impSaveStackState(SavedStack* savePtr, bool copy); void impRestoreStackState(SavedStack* savePtr); GenTree* impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); int impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const BYTE* codeAddr, const BYTE* codeEndp, bool makeInlineObservation = false); void impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken); void impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); bool impCanPInvokeInline(); bool impCanPInvokeInlineCallSite(BasicBlock* block); void impCheckForPInvokeCall( GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block); GenTreeCall* impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di = DebugInfo()); void impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig); void impInsertHelperCall(CORINFO_HELPER_DESC* helperCall); void impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall); void impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall); var_types impImportCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a // type parameter? GenTree* newobjThis, int prefixFlags, CORINFO_CALL_INFO* callInfo, IL_OFFSET rawILOffset); CORINFO_CLASS_HANDLE impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE specialIntrinsicHandle); bool impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv); GenTree* impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd); GenTree* impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension unmgdCallConv); #ifdef DEBUG var_types impImportJitTestLabelMark(int numArgs); #endif // DEBUG GenTree* impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken); GenTree* impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp); GenTree* impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp); static void impBashVarAddrsToI(GenTree* tree1, GenTree* tree2 = nullptr); GenTree* impImplicitIorI4Cast(GenTree* tree, var_types dstTyp); GenTree* impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp); void impImportLeave(BasicBlock* block); void impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr); GenTree* impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom); // Mirrors StringComparison.cs enum StringComparison { Ordinal = 4, OrdinalIgnoreCase = 5 }; enum StringComparisonJoint { Eq, // (d1 == cns1) && (s2 == cns2) Xor, // (d1 ^ cns1) | (s2 ^ cns2) }; GenTree* impStringEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags); GenTree* impSpanEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags); GenTree* impExpandHalfConstEquals(GenTreeLclVar* data, GenTree* lengthFld, bool checkForNull, bool startsWith, WCHAR* cnsData, int len, int dataOffset, StringComparison cmpMode); GenTree* impCreateCompareInd(GenTreeLclVar* obj, var_types type, ssize_t offset, ssize_t value, StringComparison ignoreCase, StringComparisonJoint joint = Eq); GenTree* impExpandHalfConstEqualsSWAR( GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset, StringComparison cmpMode); GenTree* impExpandHalfConstEqualsSIMD( GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset, StringComparison cmpMode); GenTreeStrCon* impGetStrConFromSpan(GenTree* span); GenTree* impIntrinsic(GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef, bool readonlyCall, bool tailCall, CORINFO_RESOLVED_TOKEN* pContstrainedResolvedToken, CORINFO_THIS_TRANSFORM constraintCallThisTransform, NamedIntrinsic* pIntrinsicName, bool* isSpecialIntrinsic = nullptr); GenTree* impMathIntrinsic(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, var_types callType, NamedIntrinsic intrinsicName, bool tailCall); NamedIntrinsic lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method); GenTree* impUnsupportedNamedIntrinsic(unsigned helper, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand); #ifdef FEATURE_HW_INTRINSICS GenTree* impHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand); GenTree* impSimdAsHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, GenTree* newobjThis); protected: bool compSupportsHWIntrinsic(CORINFO_InstructionSet isa); GenTree* impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, var_types retType, CorInfoType simdBaseJitType, unsigned simdSize, GenTree* newobjThis); GenTree* impSpecialIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType, var_types retType, unsigned simdSize); GenTree* getArgForHWIntrinsic(var_types argType, CORINFO_CLASS_HANDLE argClass, bool expectAddr = false, GenTree* newobjThis = nullptr); GenTree* impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, CorInfoType simdBaseJitType); GenTree* addRangeCheckIfNeeded( NamedIntrinsic intrinsic, GenTree* immOp, bool mustExpand, int immLowerBound, int immUpperBound); GenTree* addRangeCheckForHWIntrinsic(GenTree* immOp, int immLowerBound, int immUpperBound); #ifdef TARGET_XARCH GenTree* impBaseIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType, var_types retType, unsigned simdSize); GenTree* impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impAvxOrAvx2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impBMI1OrBMI2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); #endif // TARGET_XARCH #endif // FEATURE_HW_INTRINSICS GenTree* impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, NamedIntrinsic intrinsicName); GenTree* impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig); GenTree* impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig); GenTree* impKeepAliveIntrinsic(GenTree* objToKeepAlive); GenTree* impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); GenTree* impTransformThis(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM transform); //----------------- Manipulating the trees and stmts ---------------------- Statement* impStmtList; // Statements for the BB being imported. Statement* impLastStmt; // The last statement for the current BB. public: enum { CHECK_SPILL_ALL = -1, CHECK_SPILL_NONE = -2 }; void impBeginTreeList(); void impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt); void impEndTreeList(BasicBlock* block); void impAppendStmtCheck(Statement* stmt, unsigned chkLevel); void impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo = true); void impAppendStmt(Statement* stmt); void impInsertStmtBefore(Statement* stmt, Statement* stmtBefore); Statement* impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo = true); void impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore); void impAssignTempGen(unsigned tmp, GenTree* val, unsigned curLevel = (unsigned)CHECK_SPILL_NONE, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); void impAssignTempGen(unsigned tmpNum, GenTree* val, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); Statement* impExtractLastStmt(); GenTree* impCloneExpr(GenTree* tree, GenTree** clone, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt DEBUGARG(const char* reason)); GenTree* impAssignStruct(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* impAssignStructPtr(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref); var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* simdBaseJitType = nullptr); GenTree* impNormStructVal(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool forceNormalization = false); GenTree* impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup = nullptr, bool mustRestoreHandle = false, bool importParent = false); GenTree* impParentClassTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup = nullptr, bool mustRestoreHandle = false) { return impTokenToHandle(pResolvedToken, pRuntimeLookup, mustRestoreHandle, true); } GenTree* impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle); GenTree* getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind); GenTree* impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle); GenTree* impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle); GenTreeCall* impReadyToRunHelperToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoHelpFunc helper, var_types type, GenTreeCall::Use* args = nullptr, CORINFO_LOOKUP_KIND* pGenericLookupKind = nullptr); bool impIsCastHelperEligibleForClassProbe(GenTree* tree); bool impIsCastHelperMayHaveProfileData(GenTree* tree); GenTree* impCastClassOrIsInstToTree( GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset); GenTree* impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass); bool VarTypeIsMultiByteAndCanEnreg(var_types type, CORINFO_CLASS_HANDLE typeClass, unsigned* typeSize, bool forReturn, bool isVarArg, CorInfoCallConvExtension callConv); bool IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName); bool IsTargetIntrinsic(NamedIntrinsic intrinsicName); bool IsMathIntrinsic(NamedIntrinsic intrinsicName); bool IsMathIntrinsic(GenTree* tree); private: //----------------- Importing the method ---------------------------------- CORINFO_CONTEXT_HANDLE impTokenLookupContextHandle; // The context used for looking up tokens. #ifdef DEBUG unsigned impCurOpcOffs; const char* impCurOpcName; bool impNestedStackSpill; // For displaying instrs with generated native code (-n:B) Statement* impLastILoffsStmt; // oldest stmt added for which we did not call SetLastILOffset(). void impNoteLastILoffs(); #endif // Debug info of current statement being imported. It gets set to contain // no IL location (!impCurStmtDI.GetLocation().IsValid) after it has been // set in the appended trees. Then it gets updated at IL instructions for // which we have to report mapping info. // It will always contain the current inline context. DebugInfo impCurStmtDI; DebugInfo impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall); void impCurStmtOffsSet(IL_OFFSET offs); void impNoteBranchOffs(); unsigned impInitBlockLineInfo(); bool impIsThis(GenTree* obj); bool impIsLDFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr); bool impIsDUP_LDVIRTFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr); bool impIsAnySTLOC(OPCODE opcode) { return ((opcode == CEE_STLOC) || (opcode == CEE_STLOC_S) || ((opcode >= CEE_STLOC_0) && (opcode <= CEE_STLOC_3))); } GenTreeCall::Use* impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs = nullptr); bool impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const; GenTreeCall::Use* impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount = 0); //---------------- Spilling the importer stack ---------------------------- // The maximum number of bytes of IL processed without clean stack state. // It allows to limit the maximum tree size and depth. static const unsigned MAX_TREE_SIZE = 200; bool impCanSpillNow(OPCODE prevOpcode); struct PendingDsc { PendingDsc* pdNext; BasicBlock* pdBB; SavedStack pdSavedStack; ThisInitState pdThisPtrInit; }; PendingDsc* impPendingList; // list of BBs currently waiting to be imported. PendingDsc* impPendingFree; // Freed up dscs that can be reused // We keep a byte-per-block map (dynamically extended) in the top-level Compiler object of a compilation. JitExpandArray<BYTE> impPendingBlockMembers; // Return the byte for "b" (allocating/extending impPendingBlockMembers if necessary.) // Operates on the map in the top-level ancestor. BYTE impGetPendingBlockMember(BasicBlock* blk) { return impInlineRoot()->impPendingBlockMembers.Get(blk->bbInd()); } // Set the byte for "b" to "val" (allocating/extending impPendingBlockMembers if necessary.) // Operates on the map in the top-level ancestor. void impSetPendingBlockMember(BasicBlock* blk, BYTE val) { impInlineRoot()->impPendingBlockMembers.Set(blk->bbInd(), val); } bool impCanReimport; bool impSpillStackEntry(unsigned level, unsigned varNum #ifdef DEBUG , bool bAssertOnRecursion, const char* reason #endif ); void impSpillStackEnsure(bool spillLeaves = false); void impEvalSideEffects(); void impSpillSpecialSideEff(); void impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)); void impSpillValueClasses(); void impSpillEvalStack(); static fgWalkPreFn impFindValueClasses; void impSpillLclRefs(ssize_t lclNum); BasicBlock* impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter); bool impBlockIsInALoop(BasicBlock* block); void impImportBlockCode(BasicBlock* block); void impReimportMarkBlock(BasicBlock* block); void impReimportMarkSuccessors(BasicBlock* block); void impVerifyEHBlock(BasicBlock* block, bool isTryStart); void impImportBlockPending(BasicBlock* block); // Similar to impImportBlockPending, but assumes that block has already been imported once and is being // reimported for some reason. It specifically does *not* look at verCurrentState to set the EntryState // for the block, but instead, just re-uses the block's existing EntryState. void impReimportBlockPending(BasicBlock* block); var_types impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2); void impImportBlock(BasicBlock* block); // Assumes that "block" is a basic block that completes with a non-empty stack. We will assign the values // on the stack to local variables (the "spill temp" variables). The successor blocks will assume that // its incoming stack contents are in those locals. This requires "block" and its successors to agree on // the variables that will be used -- and for all the predecessors of those successors, and the // successors of those predecessors, etc. Call such a set of blocks closed under alternating // successor/predecessor edges a "spill clique." A block is a "predecessor" or "successor" member of the // clique (or, conceivably, both). Each block has a specified sequence of incoming and outgoing spill // temps. If "block" already has its outgoing spill temps assigned (they are always a contiguous series // of local variable numbers, so we represent them with the base local variable number), returns that. // Otherwise, picks a set of spill temps, and propagates this choice to all blocks in the spill clique of // which "block" is a member (asserting, in debug mode, that no block in this clique had its spill temps // chosen already. More precisely, that the incoming or outgoing spill temps are not chosen, depending // on which kind of member of the clique the block is). unsigned impGetSpillTmpBase(BasicBlock* block); // Assumes that "block" is a basic block that completes with a non-empty stack. We have previously // assigned the values on the stack to local variables (the "spill temp" variables). The successor blocks // will assume that its incoming stack contents are in those locals. This requires "block" and its // successors to agree on the variables and their types that will be used. The CLI spec allows implicit // conversions between 'int' and 'native int' or 'float' and 'double' stack types. So one predecessor can // push an int and another can push a native int. For 64-bit we have chosen to implement this by typing // the "spill temp" as native int, and then importing (or re-importing as needed) so that all the // predecessors in the "spill clique" push a native int (sign-extending if needed), and all the // successors receive a native int. Similarly float and double are unified to double. // This routine is called after a type-mismatch is detected, and it will walk the spill clique to mark // blocks for re-importation as appropriate (both successors, so they get the right incoming type, and // predecessors, so they insert an upcast if needed). void impReimportSpillClique(BasicBlock* block); // When we compute a "spill clique" (see above) these byte-maps are allocated to have a byte per basic // block, and represent the predecessor and successor members of the clique currently being computed. // *** Access to these will need to be locked in a parallel compiler. JitExpandArray<BYTE> impSpillCliquePredMembers; JitExpandArray<BYTE> impSpillCliqueSuccMembers; enum SpillCliqueDir { SpillCliquePred, SpillCliqueSucc }; // Abstract class for receiving a callback while walking a spill clique class SpillCliqueWalker { public: virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) = 0; }; // This class is used for setting the bbStkTempsIn and bbStkTempsOut on the blocks within a spill clique class SetSpillTempsBase : public SpillCliqueWalker { unsigned m_baseTmp; public: SetSpillTempsBase(unsigned baseTmp) : m_baseTmp(baseTmp) { } virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk); }; // This class is used for implementing impReimportSpillClique part on each block within the spill clique class ReimportSpillClique : public SpillCliqueWalker { Compiler* m_pComp; public: ReimportSpillClique(Compiler* pComp) : m_pComp(pComp) { } virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk); }; // This is the heart of the algorithm for walking spill cliques. It invokes callback->Visit for each // predecessor or successor within the spill clique void impWalkSpillCliqueFromPred(BasicBlock* pred, SpillCliqueWalker* callback); // For a BasicBlock that has already been imported, the EntryState has an array of GenTrees for the // incoming locals. This walks that list an resets the types of the GenTrees to match the types of // the VarDscs. They get out of sync when we have int/native int issues (see impReimportSpillClique). void impRetypeEntryStateTemps(BasicBlock* blk); BYTE impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk); void impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val); void impPushVar(GenTree* op, typeInfo tiRetVal); GenTreeLclVar* impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset)); void impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal); void impLoadVar(unsigned lclNum, IL_OFFSET offset) { impLoadVar(lclNum, offset, lvaGetDesc(lclNum)->lvVerTypeInfo); } void impLoadArg(unsigned ilArgNum, IL_OFFSET offset); void impLoadLoc(unsigned ilLclNum, IL_OFFSET offset); bool impReturnInstruction(int prefixFlags, OPCODE& opcode); #ifdef TARGET_ARM void impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* op, CORINFO_CLASS_HANDLE hClass); #endif // A free list of linked list nodes used to represent to-do stacks of basic blocks. struct BlockListNode { BasicBlock* m_blk; BlockListNode* m_next; BlockListNode(BasicBlock* blk, BlockListNode* next = nullptr) : m_blk(blk), m_next(next) { } void* operator new(size_t sz, Compiler* comp); }; BlockListNode* impBlockListNodeFreeList; void FreeBlockListNode(BlockListNode* node); bool impIsValueType(typeInfo* pTypeInfo); var_types mangleVarArgsType(var_types type); regNumber getCallArgIntRegister(regNumber floatReg); regNumber getCallArgFloatRegister(regNumber intReg); #if defined(DEBUG) static unsigned jitTotalMethodCompiled; #endif #ifdef DEBUG static LONG jitNestingLevel; #endif // DEBUG static bool impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut = nullptr); void impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult); // STATIC inlining decision based on the IL code. void impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle, CORINFO_METHOD_INFO* methInfo, bool forceInline, InlineResult* inlineResult); void impCheckCanInline(GenTreeCall* call, CORINFO_METHOD_HANDLE fncHandle, unsigned methAttr, CORINFO_CONTEXT_HANDLE exactContextHnd, InlineCandidateInfo** ppInlineCandidateInfo, InlineResult* inlineResult); void impInlineRecordArgInfo(InlineInfo* pInlineInfo, GenTree* curArgVal, unsigned argNum, InlineResult* inlineResult); void impInlineInitVars(InlineInfo* pInlineInfo); unsigned impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason)); GenTree* impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclTypeInfo); bool impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo); bool impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree, GenTreeCall::Use* additionalCallArgs, GenTree* dereferencedAddress, InlArgInfo* inlArgInfo); void impMarkInlineCandidate(GenTree* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo); void impMarkInlineCandidateHelper(GenTreeCall* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo); bool impTailCallRetTypeCompatible(bool allowWidening, var_types callerRetType, CORINFO_CLASS_HANDLE callerRetTypeClass, CorInfoCallConvExtension callerCallConv, var_types calleeRetType, CORINFO_CLASS_HANDLE calleeRetTypeClass, CorInfoCallConvExtension calleeCallConv); bool impIsTailCallILPattern( bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive); bool impIsImplicitTailCallCandidate( OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive); bool impIsClassExact(CORINFO_CLASS_HANDLE classHnd); bool impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array); CORINFO_RESOLVED_TOKEN* impAllocateToken(const CORINFO_RESOLVED_TOKEN& token); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX FlowGraph XX XX XX XX Info about the basic-blocks, their contents and the flow analysis XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: BasicBlock* fgFirstBB; // Beginning of the basic block list BasicBlock* fgLastBB; // End of the basic block list BasicBlock* fgFirstColdBlock; // First block to be placed in the cold section BasicBlock* fgEntryBB; // For OSR, the original method's entry point BasicBlock* fgOSREntryBB; // For OSR, the logical entry point (~ patchpoint) #if defined(FEATURE_EH_FUNCLETS) BasicBlock* fgFirstFuncletBB; // First block of outlined funclets (to allow block insertion before the funclets) #endif BasicBlock* fgFirstBBScratch; // Block inserted for initialization stuff. Is nullptr if no such block has been // created. BasicBlockList* fgReturnBlocks; // list of BBJ_RETURN blocks unsigned fgEdgeCount; // # of control flow edges between the BBs unsigned fgBBcount; // # of BBs in the method #ifdef DEBUG unsigned fgBBcountAtCodegen; // # of BBs in the method at the start of codegen #endif unsigned fgBBNumMax; // The max bbNum that has been assigned to basic blocks unsigned fgDomBBcount; // # of BBs for which we have dominator and reachability information BasicBlock** fgBBInvPostOrder; // The flow graph stored in an array sorted in topological order, needed to compute // dominance. Indexed by block number. Size: fgBBNumMax + 1. // After the dominance tree is computed, we cache a DFS preorder number and DFS postorder number to compute // dominance queries in O(1). fgDomTreePreOrder and fgDomTreePostOrder are arrays giving the block's preorder and // postorder number, respectively. The arrays are indexed by basic block number. (Note that blocks are numbered // starting from one. Thus, we always waste element zero. This makes debugging easier and makes the code less likely // to suffer from bugs stemming from forgetting to add or subtract one from the block number to form an array // index). The arrays are of size fgBBNumMax + 1. unsigned* fgDomTreePreOrder; unsigned* fgDomTreePostOrder; // Dominator tree used by SSA construction and copy propagation (the two are expected to use the same tree // in order to avoid the need for SSA reconstruction and an "out of SSA" phase). DomTreeNode* fgSsaDomTree; bool fgBBVarSetsInited; // Allocate array like T* a = new T[fgBBNumMax + 1]; // Using helper so we don't keep forgetting +1. template <typename T> T* fgAllocateTypeForEachBlk(CompMemKind cmk = CMK_Unknown) { return getAllocator(cmk).allocate<T>(fgBBNumMax + 1); } // BlockSets are relative to a specific set of BasicBlock numbers. If that changes // (if the blocks are renumbered), this changes. BlockSets from different epochs // cannot be meaningfully combined. Note that new blocks can be created with higher // block numbers without changing the basic block epoch. These blocks *cannot* // participate in a block set until the blocks are all renumbered, causing the epoch // to change. This is useful if continuing to use previous block sets is valuable. // If the epoch is zero, then it is uninitialized, and block sets can't be used. unsigned fgCurBBEpoch; unsigned GetCurBasicBlockEpoch() { return fgCurBBEpoch; } // The number of basic blocks in the current epoch. When the blocks are renumbered, // this is fgBBcount. As blocks are added, fgBBcount increases, fgCurBBEpochSize remains // the same, until a new BasicBlock epoch is created, such as when the blocks are all renumbered. unsigned fgCurBBEpochSize; // The number of "size_t" elements required to hold a bitset large enough for fgCurBBEpochSize // bits. This is precomputed to avoid doing math every time BasicBlockBitSetTraits::GetArrSize() is called. unsigned fgBBSetCountInSizeTUnits; void NewBasicBlockEpoch() { INDEBUG(unsigned oldEpochArrSize = fgBBSetCountInSizeTUnits); // We have a new epoch. Compute and cache the size needed for new BlockSets. fgCurBBEpoch++; fgCurBBEpochSize = fgBBNumMax + 1; fgBBSetCountInSizeTUnits = roundUp(fgCurBBEpochSize, (unsigned)(sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8); #ifdef DEBUG // All BlockSet objects are now invalid! fgReachabilitySetsValid = false; // the bbReach sets are now invalid! fgEnterBlksSetValid = false; // the fgEnterBlks set is now invalid! if (verbose) { unsigned epochArrSize = BasicBlockBitSetTraits::GetArrSize(this, sizeof(size_t)); printf("\nNew BlockSet epoch %d, # of blocks (including unused BB00): %u, bitset array size: %u (%s)", fgCurBBEpoch, fgCurBBEpochSize, epochArrSize, (epochArrSize <= 1) ? "short" : "long"); if ((fgCurBBEpoch != 1) && ((oldEpochArrSize <= 1) != (epochArrSize <= 1))) { // If we're not just establishing the first epoch, and the epoch array size has changed such that we're // going to change our bitset representation from short (just a size_t bitset) to long (a pointer to an // array of size_t bitsets), then print that out. printf("; NOTE: BlockSet size was previously %s!", (oldEpochArrSize <= 1) ? "short" : "long"); } printf("\n"); } #endif // DEBUG } void EnsureBasicBlockEpoch() { if (fgCurBBEpochSize != fgBBNumMax + 1) { NewBasicBlockEpoch(); } } BasicBlock* fgNewBasicBlock(BBjumpKinds jumpKind); void fgEnsureFirstBBisScratch(); bool fgFirstBBisScratch(); bool fgBBisScratch(BasicBlock* block); void fgExtendEHRegionBefore(BasicBlock* block); void fgExtendEHRegionAfter(BasicBlock* block); BasicBlock* fgNewBBbefore(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion); BasicBlock* fgNewBBafter(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind, unsigned tryIndex, unsigned hndIndex, BasicBlock* nearBlk, bool putInFilter = false, bool runRarely = false, bool insertAtEnd = false); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind, BasicBlock* srcBlk, bool runRarely = false, bool insertAtEnd = false); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind); BasicBlock* fgNewBBinRegionWorker(BBjumpKinds jumpKind, BasicBlock* afterBlk, unsigned xcptnIndex, bool putInTryRegion); void fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk); void fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk); void fgUnlinkBlock(BasicBlock* block); #ifdef FEATURE_JIT_METHOD_PERF unsigned fgMeasureIR(); #endif // FEATURE_JIT_METHOD_PERF bool fgModified; // True if the flow graph has been modified recently bool fgComputePredsDone; // Have we computed the bbPreds list bool fgCheapPredsValid; // Is the bbCheapPreds list valid? bool fgDomsComputed; // Have we computed the dominator sets? bool fgReturnBlocksComputed; // Have we computed the return blocks list? bool fgOptimizedFinally; // Did we optimize any try-finallys? bool fgHasSwitch; // any BBJ_SWITCH jumps? BlockSet fgEnterBlks; // Set of blocks which have a special transfer of control; the "entry" blocks plus EH handler // begin blocks. #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) BlockSet fgAlwaysBlks; // Set of blocks which are BBJ_ALWAYS part of BBJ_CALLFINALLY/BBJ_ALWAYS pair that should // never be removed due to a requirement to use the BBJ_ALWAYS for generating code and // not have "retless" blocks. #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #ifdef DEBUG bool fgReachabilitySetsValid; // Are the bbReach sets valid? bool fgEnterBlksSetValid; // Is the fgEnterBlks set valid? #endif // DEBUG bool fgRemoveRestOfBlock; // true if we know that we will throw bool fgStmtRemoved; // true if we remove statements -> need new DFA // There are two modes for ordering of the trees. // - In FGOrderTree, the dominant ordering is the tree order, and the nodes contained in // each tree and sub-tree are contiguous, and can be traversed (in gtNext/gtPrev order) // by traversing the tree according to the order of the operands. // - In FGOrderLinear, the dominant ordering is the linear order. enum FlowGraphOrder { FGOrderTree, FGOrderLinear }; FlowGraphOrder fgOrder; // The following are boolean flags that keep track of the state of internal data structures bool fgStmtListThreaded; // true if the node list is now threaded bool fgCanRelocateEHRegions; // true if we are allowed to relocate the EH regions bool fgEdgeWeightsComputed; // true after we have called fgComputeEdgeWeights bool fgHaveValidEdgeWeights; // true if we were successful in computing all of the edge weights bool fgSlopUsedInEdgeWeights; // true if their was some slop used when computing the edge weights bool fgRangeUsedInEdgeWeights; // true if some of the edgeWeight are expressed in Min..Max form bool fgNeedsUpdateFlowGraph; // true if we need to run fgUpdateFlowGraph weight_t fgCalledCount; // count of the number of times this method was called // This is derived from the profile data // or is BB_UNITY_WEIGHT when we don't have profile data #if defined(FEATURE_EH_FUNCLETS) bool fgFuncletsCreated; // true if the funclet creation phase has been run #endif // FEATURE_EH_FUNCLETS bool fgGlobalMorph; // indicates if we are during the global morphing phase // since fgMorphTree can be called from several places bool impBoxTempInUse; // the temp below is valid and available unsigned impBoxTemp; // a temporary that is used for boxing #ifdef DEBUG bool jitFallbackCompile; // Are we doing a fallback compile? That is, have we executed a NO_WAY assert, // and we are trying to compile again in a "safer", minopts mode? #endif #if defined(DEBUG) unsigned impInlinedCodeSize; bool fgPrintInlinedMethods; #endif jitstd::vector<flowList*>* fgPredListSortVector; //------------------------------------------------------------------------- void fgInit(); PhaseStatus fgImport(); PhaseStatus fgTransformIndirectCalls(); PhaseStatus fgTransformPatchpoints(); PhaseStatus fgInline(); PhaseStatus fgRemoveEmptyTry(); PhaseStatus fgRemoveEmptyFinally(); PhaseStatus fgMergeFinallyChains(); PhaseStatus fgCloneFinally(); void fgCleanupContinuation(BasicBlock* continuation); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) PhaseStatus fgUpdateFinallyTargetFlags(); void fgClearAllFinallyTargetBits(); void fgAddFinallyTargetFlags(); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) PhaseStatus fgTailMergeThrows(); void fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock, BasicBlock* nonCanonicalBlock, BasicBlock* canonicalBlock, flowList* predEdge); void fgTailMergeThrowsJumpToHelper(BasicBlock* predBlock, BasicBlock* nonCanonicalBlock, BasicBlock* canonicalBlock, flowList* predEdge); GenTree* fgCheckCallArgUpdate(GenTree* parent, GenTree* child, var_types origType); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Sometimes we need to defer updating the BBF_FINALLY_TARGET bit. fgNeedToAddFinallyTargetBits signals // when this is necessary. bool fgNeedToAddFinallyTargetBits; #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) bool fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, BasicBlock* handler, BlockToBlockMap& continuationMap); GenTree* fgGetCritSectOfStaticMethod(); #if defined(FEATURE_EH_FUNCLETS) void fgAddSyncMethodEnterExit(); GenTree* fgCreateMonitorTree(unsigned lvaMonitorBool, unsigned lvaThisVar, BasicBlock* block, bool enter); void fgConvertSyncReturnToLeave(BasicBlock* block); #endif // FEATURE_EH_FUNCLETS void fgAddReversePInvokeEnterExit(); bool fgMoreThanOneReturnBlock(); // The number of separate return points in the method. unsigned fgReturnCount; void fgAddInternal(); enum class FoldResult { FOLD_DID_NOTHING, FOLD_CHANGED_CONTROL_FLOW, FOLD_REMOVED_LAST_STMT, FOLD_ALTERED_LAST_STMT, }; FoldResult fgFoldConditional(BasicBlock* block); void fgMorphStmts(BasicBlock* block); void fgMorphBlocks(); void fgMergeBlockReturn(BasicBlock* block); bool fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg)); void fgSetOptions(); #ifdef DEBUG static fgWalkPreFn fgAssertNoQmark; void fgPreExpandQmarkChecks(GenTree* expr); void fgPostExpandQmarkChecks(); static void fgCheckQmarkAllowedForm(GenTree* tree); #endif IL_OFFSET fgFindBlockILOffset(BasicBlock* block); void fgFixEntryFlowForOSR(); BasicBlock* fgSplitBlockAtBeginning(BasicBlock* curr); BasicBlock* fgSplitBlockAtEnd(BasicBlock* curr); BasicBlock* fgSplitBlockAfterStatement(BasicBlock* curr, Statement* stmt); BasicBlock* fgSplitBlockAfterNode(BasicBlock* curr, GenTree* node); // for LIR BasicBlock* fgSplitEdge(BasicBlock* curr, BasicBlock* succ); Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block, const DebugInfo& di); Statement* fgNewStmtFromTree(GenTree* tree); Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block); Statement* fgNewStmtFromTree(GenTree* tree, const DebugInfo& di); GenTree* fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst = nullptr); void fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt); void fgExpandQmarkStmt(BasicBlock* block, Statement* stmt); void fgExpandQmarkNodes(); // Do "simple lowering." This functionality is (conceptually) part of "general" // lowering that is distributed between fgMorph and the lowering phase of LSRA. void fgSimpleLowering(); GenTree* fgInitThisClass(); GenTreeCall* fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls, CorInfoHelpFunc helper); GenTreeCall* fgGetSharedCCtor(CORINFO_CLASS_HANDLE cls); bool backendRequiresLocalVarLifetimes() { return !opts.MinOpts() || m_pLinearScan->willEnregisterLocalVars(); } void fgLocalVarLiveness(); void fgLocalVarLivenessInit(); void fgPerNodeLocalVarLiveness(GenTree* node); void fgPerBlockLocalVarLiveness(); VARSET_VALRET_TP fgGetHandlerLiveVars(BasicBlock* block); void fgLiveVarAnalysis(bool updateInternalOnly = false); void fgComputeLifeCall(VARSET_TP& life, GenTreeCall* call); void fgComputeLifeTrackedLocalUse(VARSET_TP& life, LclVarDsc& varDsc, GenTreeLclVarCommon* node); bool fgComputeLifeTrackedLocalDef(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, LclVarDsc& varDsc, GenTreeLclVarCommon* node); bool fgComputeLifeUntrackedLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, LclVarDsc& varDsc, GenTreeLclVarCommon* lclVarNode); bool fgComputeLifeLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, GenTree* lclVarNode); void fgComputeLife(VARSET_TP& life, GenTree* startNode, GenTree* endNode, VARSET_VALARG_TP volatileVars, bool* pStmtInfoDirty DEBUGARG(bool* treeModf)); void fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALARG_TP volatileVars); bool fgTryRemoveNonLocal(GenTree* node, LIR::Range* blockRange); void fgRemoveDeadStoreLIR(GenTree* store, BasicBlock* block); bool fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_VALARG_TP life, bool* doAgain, bool* pStmtInfoDirty, bool* pStoreRemoved DEBUGARG(bool* treeModf)); void fgInterBlockLocalVarLiveness(); // Blocks: convenience methods for enabling range-based `for` iteration over the function's blocks, e.g.: // 1. for (BasicBlock* const block : compiler->Blocks()) ... // 2. for (BasicBlock* const block : compiler->Blocks(startBlock)) ... // 3. for (BasicBlock* const block : compiler->Blocks(startBlock, endBlock)) ... // In case (1), the block list can be empty. In case (2), `startBlock` can be nullptr. In case (3), // both `startBlock` and `endBlock` must be non-null. // BasicBlockSimpleList Blocks() const { return BasicBlockSimpleList(fgFirstBB); } BasicBlockSimpleList Blocks(BasicBlock* startBlock) const { return BasicBlockSimpleList(startBlock); } BasicBlockRangeList Blocks(BasicBlock* startBlock, BasicBlock* endBlock) const { return BasicBlockRangeList(startBlock, endBlock); } // The presence of a partial definition presents some difficulties for SSA: this is both a use of some SSA name // of "x", and a def of a new SSA name for "x". The tree only has one local variable for "x", so it has to choose // whether to treat that as the use or def. It chooses the "use", and thus the old SSA name. This map allows us // to record/recover the "def" SSA number, given the lcl var node for "x" in such a tree. typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, unsigned> NodeToUnsignedMap; NodeToUnsignedMap* m_opAsgnVarDefSsaNums; NodeToUnsignedMap* GetOpAsgnVarDefSsaNums() { if (m_opAsgnVarDefSsaNums == nullptr) { m_opAsgnVarDefSsaNums = new (getAllocator()) NodeToUnsignedMap(getAllocator()); } return m_opAsgnVarDefSsaNums; } // This map tracks nodes whose value numbers explicitly or implicitly depend on memory states. // The map provides the entry block of the most closely enclosing loop that // defines the memory region accessed when defining the nodes's VN. // // This information should be consulted when considering hoisting node out of a loop, as the VN // for the node will only be valid within the indicated loop. // // It is not fine-grained enough to track memory dependence within loops, so cannot be used // for more general code motion. // // If a node does not have an entry in the map we currently assume the VN is not memory dependent // and so memory does not constrain hoisting. // typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, BasicBlock*> NodeToLoopMemoryBlockMap; NodeToLoopMemoryBlockMap* m_nodeToLoopMemoryBlockMap; NodeToLoopMemoryBlockMap* GetNodeToLoopMemoryBlockMap() { if (m_nodeToLoopMemoryBlockMap == nullptr) { m_nodeToLoopMemoryBlockMap = new (getAllocator()) NodeToLoopMemoryBlockMap(getAllocator()); } return m_nodeToLoopMemoryBlockMap; } void optRecordLoopMemoryDependence(GenTree* tree, BasicBlock* block, ValueNum memoryVN); void optCopyLoopMemoryDependence(GenTree* fromTree, GenTree* toTree); // Requires value numbering phase to have completed. Returns the value number ("gtVN") of the // "tree," EXCEPT in the case of GTF_VAR_USEASG, because the tree node's gtVN member is the // "use" VN. Performs a lookup into the map of (use asg tree -> def VN.) to return the "def's" // VN. inline ValueNum GetUseAsgDefVNOrTreeVN(GenTree* tree); // Requires that "lcl" has the GTF_VAR_DEF flag set. Returns the SSA number of "lcl". // Except: assumes that lcl is a def, and if it is // a partial def (GTF_VAR_USEASG), looks up and returns the SSA number for the "def", // rather than the "use" SSA number recorded in the tree "lcl". inline unsigned GetSsaNumForLocalVarDef(GenTree* lcl); inline bool PreciseRefCountsRequired(); // Performs SSA conversion. void fgSsaBuild(); // Reset any data structures to the state expected by "fgSsaBuild", so it can be run again. void fgResetForSsa(); unsigned fgSsaPassesCompleted; // Number of times fgSsaBuild has been run. // Returns "true" if this is a special variable that is never zero initialized in the prolog. inline bool fgVarIsNeverZeroInitializedInProlog(unsigned varNum); // Returns "true" if the variable needs explicit zero initialization. inline bool fgVarNeedsExplicitZeroInit(unsigned varNum, bool bbInALoop, bool bbIsReturn); // The value numbers for this compilation. ValueNumStore* vnStore; public: ValueNumStore* GetValueNumStore() { return vnStore; } // Do value numbering (assign a value number to each // tree node). void fgValueNumber(); // Computes new GcHeap VN via the assignment H[elemTypeEq][arrVN][inx][fldSeq] = rhsVN. // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. // The 'indType' is the indirection type of the lhs of the assignment and will typically // match the element type of the array or fldSeq. When this type doesn't match // or if the fldSeq is 'NotAField' we invalidate the array contents H[elemTypeEq][arrVN] // ValueNum fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq, ValueNum arrVN, ValueNum inxVN, FieldSeqNode* fldSeq, ValueNum rhsVN, var_types indType); // Requires that "tree" is a GT_IND marked as an array index, and that its address argument // has been parsed to yield the other input arguments. If evaluation of the address // can raise exceptions, those should be captured in the exception set "addrXvnp". // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. // Marks "tree" with the VN for H[elemTypeEq][arrVN][inx][fldSeq] (for the liberal VN; a new unique // VN for the conservative VN.) Also marks the tree's argument as the address of an array element. // The type tree->TypeGet() will typically match the element type of the array or fldSeq. // When this type doesn't match or if the fldSeq is 'NotAField' we return a new unique VN // ValueNum fgValueNumberArrIndexVal(GenTree* tree, CORINFO_CLASS_HANDLE elemTypeEq, ValueNum arrVN, ValueNum inxVN, ValueNumPair addrXvnp, FieldSeqNode* fldSeq); // Requires "funcApp" to be a VNF_PtrToArrElem, and "addrXvnp" to represent the exception set thrown // by evaluating the array index expression "tree". Returns the value number resulting from // dereferencing the array in the current GcHeap state. If "tree" is non-null, it must be the // "GT_IND" that does the dereference, and it is given the returned value number. ValueNum fgValueNumberArrIndexVal(GenTree* tree, VNFuncApp* funcApp, ValueNumPair addrXvnp); // Compute the value number for a byref-exposed load of the given type via the given pointerVN. ValueNum fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN); unsigned fgVNPassesCompleted; // Number of times fgValueNumber has been run. // Utility functions for fgValueNumber. // Perform value-numbering for the trees in "blk". void fgValueNumberBlock(BasicBlock* blk); // Requires that "entryBlock" is the entry block of loop "loopNum", and that "loopNum" is the // innermost loop of which "entryBlock" is the entry. Returns the value number that should be // assumed for the memoryKind at the start "entryBlk". ValueNum fgMemoryVNForLoopSideEffects(MemoryKind memoryKind, BasicBlock* entryBlock, unsigned loopNum); // Called when an operation (performed by "tree", described by "msg") may cause the GcHeap to be mutated. // As GcHeap is a subset of ByrefExposed, this will also annotate the ByrefExposed mutation. void fgMutateGcHeap(GenTree* tree DEBUGARG(const char* msg)); // Called when an operation (performed by "tree", described by "msg") may cause an address-exposed local to be // mutated. void fgMutateAddressExposedLocal(GenTree* tree DEBUGARG(const char* msg)); // For a GC heap store at curTree, record the new curMemoryVN's and update curTree's MemorySsaMap. // As GcHeap is a subset of ByrefExposed, this will also record the ByrefExposed store. void recordGcHeapStore(GenTree* curTree, ValueNum gcHeapVN DEBUGARG(const char* msg)); // For a store to an address-exposed local at curTree, record the new curMemoryVN and update curTree's MemorySsaMap. void recordAddressExposedLocalStore(GenTree* curTree, ValueNum memoryVN DEBUGARG(const char* msg)); void fgSetCurrentMemoryVN(MemoryKind memoryKind, ValueNum newMemoryVN); // Tree caused an update in the current memory VN. If "tree" has an associated heap SSA #, record that // value in that SSA #. void fgValueNumberRecordMemorySsa(MemoryKind memoryKind, GenTree* tree); // The input 'tree' is a leaf node that is a constant // Assign the proper value number to the tree void fgValueNumberTreeConst(GenTree* tree); // If the VN store has been initialized, reassign the // proper value number to the constant tree. void fgUpdateConstTreeValueNumber(GenTree* tree); // Assumes that all inputs to "tree" have had value numbers assigned; assigns a VN to tree. // (With some exceptions: the VN of the lhs of an assignment is assigned as part of the // assignment.) void fgValueNumberTree(GenTree* tree); void fgValueNumberAssignment(GenTreeOp* tree); // Does value-numbering for a block assignment. void fgValueNumberBlockAssignment(GenTree* tree); bool fgValueNumberBlockAssignmentTypeCheck(LclVarDsc* dstVarDsc, FieldSeqNode* dstFldSeq, GenTree* src); // Does value-numbering for a cast tree. void fgValueNumberCastTree(GenTree* tree); // Does value-numbering for an intrinsic tree. void fgValueNumberIntrinsic(GenTree* tree); #ifdef FEATURE_SIMD // Does value-numbering for a GT_SIMD tree void fgValueNumberSimd(GenTreeSIMD* tree); #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS // Does value-numbering for a GT_HWINTRINSIC tree void fgValueNumberHWIntrinsic(GenTreeHWIntrinsic* tree); #endif // FEATURE_HW_INTRINSICS // Does value-numbering for a call. We interpret some helper calls. void fgValueNumberCall(GenTreeCall* call); // Does value-numbering for a helper representing a cast operation. void fgValueNumberCastHelper(GenTreeCall* call); // Does value-numbering for a helper "call" that has a VN function symbol "vnf". void fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueNumPair vnpExc); // Requires "helpCall" to be a helper call. Assigns it a value number; // we understand the semantics of some of the calls. Returns "true" if // the call may modify the heap (we assume arbitrary memory side effects if so). bool fgValueNumberHelperCall(GenTreeCall* helpCall); // Requires that "helpFunc" is one of the pure Jit Helper methods. // Returns the corresponding VNFunc to use for value numbering VNFunc fgValueNumberJitHelperMethodVNFunc(CorInfoHelpFunc helpFunc); // Adds the exception set for the current tree node which has a memory indirection operation void fgValueNumberAddExceptionSetForIndirection(GenTree* tree, GenTree* baseAddr); // Adds the exception sets for the current tree node which is performing a division or modulus operation void fgValueNumberAddExceptionSetForDivision(GenTree* tree); // Adds the exception set for the current tree node which is performing a overflow checking operation void fgValueNumberAddExceptionSetForOverflow(GenTree* tree); // Adds the exception set for the current tree node which is performing a bounds check operation void fgValueNumberAddExceptionSetForBoundsCheck(GenTree* tree); // Adds the exception set for the current tree node which is performing a ckfinite operation void fgValueNumberAddExceptionSetForCkFinite(GenTree* tree); // Adds the exception sets for the current tree node void fgValueNumberAddExceptionSet(GenTree* tree); #ifdef DEBUG void fgDebugCheckExceptionSets(); void fgDebugCheckValueNumberedTree(GenTree* tree); #endif // These are the current value number for the memory implicit variables while // doing value numbering. These are the value numbers under the "liberal" interpretation // of memory values; the "conservative" interpretation needs no VN, since every access of // memory yields an unknown value. ValueNum fgCurMemoryVN[MemoryKindCount]; // Return a "pseudo"-class handle for an array element type. If "elemType" is TYP_STRUCT, // requires "elemStructType" to be non-null (and to have a low-order zero). Otherwise, low order bit // is 1, and the rest is an encoding of "elemTyp". static CORINFO_CLASS_HANDLE EncodeElemType(var_types elemTyp, CORINFO_CLASS_HANDLE elemStructType) { if (elemStructType != nullptr) { assert(varTypeIsStruct(elemTyp) || elemTyp == TYP_REF || elemTyp == TYP_BYREF || varTypeIsIntegral(elemTyp)); assert((size_t(elemStructType) & 0x1) == 0x0); // Make sure the encoding below is valid. return elemStructType; } else { assert(elemTyp != TYP_STRUCT); elemTyp = varTypeToSigned(elemTyp); return CORINFO_CLASS_HANDLE(size_t(elemTyp) << 1 | 0x1); } } // If "clsHnd" is the result of an "EncodePrim" call, returns true and sets "*pPrimType" to the // var_types it represents. Otherwise, returns TYP_STRUCT (on the assumption that "clsHnd" is // the struct type of the element). static var_types DecodeElemType(CORINFO_CLASS_HANDLE clsHnd) { size_t clsHndVal = size_t(clsHnd); if (clsHndVal & 0x1) { return var_types(clsHndVal >> 1); } else { return TYP_STRUCT; } } // Convert a BYTE which represents the VM's CorInfoGCtype to the JIT's var_types var_types getJitGCType(BYTE gcType); // Returns true if the provided type should be treated as a primitive type // for the unmanaged calling conventions. bool isNativePrimitiveStructType(CORINFO_CLASS_HANDLE clsHnd); enum structPassingKind { SPK_Unknown, // Invalid value, never returned SPK_PrimitiveType, // The struct is passed/returned using a primitive type. SPK_EnclosingType, // Like SPK_Primitive type, but used for return types that // require a primitive type temp that is larger than the struct size. // Currently used for structs of size 3, 5, 6, or 7 bytes. SPK_ByValue, // The struct is passed/returned by value (using the ABI rules) // for ARM64 and UNIX_X64 in multiple registers. (when all of the // parameters registers are used, then the stack will be used) // for X86 passed on the stack, for ARM32 passed in registers // or the stack or split between registers and the stack. SPK_ByValueAsHfa, // The struct is passed/returned as an HFA in multiple registers. SPK_ByReference }; // The struct is passed/returned by reference to a copy/buffer. // Get the "primitive" type that is is used when we are given a struct of size 'structSize'. // For pointer sized structs the 'clsHnd' is used to determine if the struct contains GC ref. // A "primitive" type is one of the scalar types: byte, short, int, long, ref, float, double // If we can't or shouldn't use a "primitive" type then TYP_UNKNOWN is returned. // // isVarArg is passed for use on Windows Arm64 to change the decision returned regarding // hfa types. // var_types getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd, bool isVarArg); // Get the type that is used to pass values of the given struct type. // isVarArg is passed for use on Windows Arm64 to change the decision returned regarding // hfa types. // var_types getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, structPassingKind* wbPassStruct, bool isVarArg, unsigned structSize); // Get the type that is used to return values of the given struct type. // If the size is unknown, pass 0 and it will be determined from 'clsHnd'. var_types getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, CorInfoCallConvExtension callConv, structPassingKind* wbPassStruct = nullptr, unsigned structSize = 0); #ifdef DEBUG // Print a representation of "vnp" or "vn" on standard output. // If "level" is non-zero, we also print out a partial expansion of the value. void vnpPrint(ValueNumPair vnp, unsigned level); void vnPrint(ValueNum vn, unsigned level); #endif bool fgDominate(BasicBlock* b1, BasicBlock* b2); // Return true if b1 dominates b2 // Dominator computation member functions // Not exposed outside Compiler protected: bool fgReachable(BasicBlock* b1, BasicBlock* b2); // Returns true if block b1 can reach block b2 // Compute immediate dominators, the dominator tree and and its pre/post-order travsersal numbers. void fgComputeDoms(); void fgCompDominatedByExceptionalEntryBlocks(); BlockSet_ValRet_T fgGetDominatorSet(BasicBlock* block); // Returns a set of blocks that dominate the given block. // Note: this is relatively slow compared to calling fgDominate(), // especially if dealing with a single block versus block check. void fgComputeReachabilitySets(); // Compute bbReach sets. (Also sets BBF_GC_SAFE_POINT flag on blocks.) void fgComputeReturnBlocks(); // Initialize fgReturnBlocks to a list of BBJ_RETURN blocks. void fgComputeEnterBlocksSet(); // Compute the set of entry blocks, 'fgEnterBlks'. bool fgRemoveUnreachableBlocks(); // Remove blocks determined to be unreachable by the bbReach sets. void fgComputeReachability(); // Perform flow graph node reachability analysis. BasicBlock* fgIntersectDom(BasicBlock* a, BasicBlock* b); // Intersect two immediate dominator sets. void fgDfsInvPostOrder(); // In order to compute dominance using fgIntersectDom, the flow graph nodes must be // processed in topological sort, this function takes care of that. void fgDfsInvPostOrderHelper(BasicBlock* block, BlockSet& visited, unsigned* count); BlockSet_ValRet_T fgDomFindStartNodes(); // Computes which basic blocks don't have incoming edges in the flow graph. // Returns this as a set. INDEBUG(void fgDispDomTree(DomTreeNode* domTree);) // Helper that prints out the Dominator Tree in debug builds. DomTreeNode* fgBuildDomTree(); // Once we compute all the immediate dominator sets for each node in the flow graph // (performed by fgComputeDoms), this procedure builds the dominance tree represented // adjacency lists. // In order to speed up the queries of the form 'Does A dominates B', we can perform a DFS preorder and postorder // traversal of the dominance tree and the dominance query will become A dominates B iif preOrder(A) <= preOrder(B) // && postOrder(A) >= postOrder(B) making the computation O(1). void fgNumberDomTree(DomTreeNode* domTree); // When the flow graph changes, we need to update the block numbers, predecessor lists, reachability sets, // dominators, and possibly loops. void fgUpdateChangedFlowGraph(const bool computePreds = true, const bool computeDoms = true, const bool computeReturnBlocks = false, const bool computeLoops = false); public: // Compute the predecessors of the blocks in the control flow graph. void fgComputePreds(); // Remove all predecessor information. void fgRemovePreds(); // Compute the cheap flow graph predecessors lists. This is used in some early phases // before the full predecessors lists are computed. void fgComputeCheapPreds(); private: void fgAddCheapPred(BasicBlock* block, BasicBlock* blockPred); void fgRemoveCheapPred(BasicBlock* block, BasicBlock* blockPred); public: enum GCPollType { GCPOLL_NONE, GCPOLL_CALL, GCPOLL_INLINE }; // Initialize the per-block variable sets (used for liveness analysis). void fgInitBlockVarSets(); PhaseStatus fgInsertGCPolls(); BasicBlock* fgCreateGCPoll(GCPollType pollType, BasicBlock* block); // Requires that "block" is a block that returns from // a finally. Returns the number of successors (jump targets of // of blocks in the covered "try" that did a "LEAVE".) unsigned fgNSuccsOfFinallyRet(BasicBlock* block); // Requires that "block" is a block that returns (in the sense of BBJ_EHFINALLYRET) from // a finally. Returns its "i"th successor (jump targets of // of blocks in the covered "try" that did a "LEAVE".) // Requires that "i" < fgNSuccsOfFinallyRet(block). BasicBlock* fgSuccOfFinallyRet(BasicBlock* block, unsigned i); private: // Factor out common portions of the impls of the methods above. void fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock** bres, unsigned* nres); public: // For many purposes, it is desirable to be able to enumerate the *distinct* targets of a switch statement, // skipping duplicate targets. (E.g., in flow analyses that are only interested in the set of possible targets.) // SwitchUniqueSuccSet contains the non-duplicated switch targets. // (Code that modifies the jump table of a switch has an obligation to call Compiler::UpdateSwitchTableTarget, // which in turn will call the "UpdateTarget" method of this type if a SwitchUniqueSuccSet has already // been computed for the switch block. If a switch block is deleted or is transformed into a non-switch, // we leave the entry associated with the block, but it will no longer be accessed.) struct SwitchUniqueSuccSet { unsigned numDistinctSuccs; // Number of distinct targets of the switch. BasicBlock** nonDuplicates; // Array of "numDistinctSuccs", containing all the distinct switch target // successors. // The switch block "switchBlk" just had an entry with value "from" modified to the value "to". // Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk", // remove it from "this", and ensure that "to" is a member. Use "alloc" to do any required allocation. void UpdateTarget(CompAllocator alloc, BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to); }; typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, SwitchUniqueSuccSet> BlockToSwitchDescMap; private: // Maps BasicBlock*'s that end in switch statements to SwitchUniqueSuccSets that allow // iteration over only the distinct successors. BlockToSwitchDescMap* m_switchDescMap; public: BlockToSwitchDescMap* GetSwitchDescMap(bool createIfNull = true) { if ((m_switchDescMap == nullptr) && createIfNull) { m_switchDescMap = new (getAllocator()) BlockToSwitchDescMap(getAllocator()); } return m_switchDescMap; } // Invalidate the map of unique switch block successors. For example, since the hash key of the map // depends on block numbers, we must invalidate the map when the blocks are renumbered, to ensure that // we don't accidentally look up and return the wrong switch data. void InvalidateUniqueSwitchSuccMap() { m_switchDescMap = nullptr; } // Requires "switchBlock" to be a block that ends in a switch. Returns // the corresponding SwitchUniqueSuccSet. SwitchUniqueSuccSet GetDescriptorForSwitch(BasicBlock* switchBlk); // The switch block "switchBlk" just had an entry with value "from" modified to the value "to". // Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk", // remove it from "this", and ensure that "to" is a member. void UpdateSwitchTableTarget(BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to); // Remove the "SwitchUniqueSuccSet" of "switchBlk" in the BlockToSwitchDescMap. void fgInvalidateSwitchDescMapEntry(BasicBlock* switchBlk); BasicBlock* fgFirstBlockOfHandler(BasicBlock* block); bool fgIsFirstBlockOfFilterOrHandler(BasicBlock* block); flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred); flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred, flowList*** ptrToPred); flowList* fgRemoveRefPred(BasicBlock* block, BasicBlock* blockPred); flowList* fgRemoveAllRefPreds(BasicBlock* block, BasicBlock* blockPred); void fgRemoveBlockAsPred(BasicBlock* block); void fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSwitchBlock); void fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* newTarget, BasicBlock* oldTarget); void fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, BasicBlock* oldTarget); void fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock* newPred); flowList* fgAddRefPred(BasicBlock* block, BasicBlock* blockPred, flowList* oldEdge = nullptr, bool initializingPreds = false); // Only set to 'true' when we are computing preds in // fgComputePreds() void fgFindBasicBlocks(); bool fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt); bool fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionIndex, bool putInTryRegion); BasicBlock* fgFindInsertPoint(unsigned regionIndex, bool putInTryRegion, BasicBlock* startBlk, BasicBlock* endBlk, BasicBlock* nearBlk, BasicBlock* jumpBlk, bool runRarely); unsigned fgGetNestingLevel(BasicBlock* block, unsigned* pFinallyNesting = nullptr); void fgPostImportationCleanup(); void fgRemoveStmt(BasicBlock* block, Statement* stmt DEBUGARG(bool isUnlink = false)); void fgUnlinkStmt(BasicBlock* block, Statement* stmt); bool fgCheckRemoveStmt(BasicBlock* block, Statement* stmt); void fgCreateLoopPreHeader(unsigned lnum); void fgUnreachableBlock(BasicBlock* block); void fgRemoveConditionalJump(BasicBlock* block); BasicBlock* fgLastBBInMainFunction(); BasicBlock* fgEndBBAfterMainFunction(); void fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd); void fgRemoveBlock(BasicBlock* block, bool unreachable); bool fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext); void fgCompactBlocks(BasicBlock* block, BasicBlock* bNext); void fgUpdateLoopsAfterCompacting(BasicBlock* block, BasicBlock* bNext); BasicBlock* fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst); bool fgRenumberBlocks(); bool fgExpandRarelyRunBlocks(); bool fgEhAllowsMoveBlock(BasicBlock* bBefore, BasicBlock* bAfter); void fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBlock* insertAfterBlk); enum FG_RELOCATE_TYPE { FG_RELOCATE_TRY, // relocate the 'try' region FG_RELOCATE_HANDLER // relocate the handler region (including the filter if necessary) }; BasicBlock* fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType); #if defined(FEATURE_EH_FUNCLETS) #if defined(TARGET_ARM) void fgClearFinallyTargetBit(BasicBlock* block); #endif // defined(TARGET_ARM) bool fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block); bool fgAnyIntraHandlerPreds(BasicBlock* block); void fgInsertFuncletPrologBlock(BasicBlock* block); void fgCreateFuncletPrologBlocks(); void fgCreateFunclets(); #else // !FEATURE_EH_FUNCLETS bool fgRelocateEHRegions(); #endif // !FEATURE_EH_FUNCLETS bool fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* target); bool fgBlockEndFavorsTailDuplication(BasicBlock* block, unsigned lclNum); bool fgBlockIsGoodTailDuplicationCandidate(BasicBlock* block, unsigned* lclNum); bool fgOptimizeEmptyBlock(BasicBlock* block); bool fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBlock* bDest); bool fgOptimizeBranch(BasicBlock* bJump); bool fgOptimizeSwitchBranches(BasicBlock* block); bool fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, BasicBlock* bPrev); bool fgOptimizeSwitchJumps(); #ifdef DEBUG void fgPrintEdgeWeights(); #endif void fgComputeBlockAndEdgeWeights(); weight_t fgComputeMissingBlockWeights(); void fgComputeCalledCount(weight_t returnWeight); void fgComputeEdgeWeights(); bool fgReorderBlocks(); PhaseStatus fgDetermineFirstColdBlock(); bool fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc = nullptr); bool fgUpdateFlowGraph(bool doTailDup = false); void fgFindOperOrder(); // method that returns if you should split here typedef bool(fgSplitPredicate)(GenTree* tree, GenTree* parent, fgWalkData* data); void fgSetBlockOrder(); void fgRemoveReturnBlock(BasicBlock* block); /* Helper code that has been factored out */ inline void fgConvertBBToThrowBB(BasicBlock* block); bool fgCastNeeded(GenTree* tree, var_types toType); GenTree* fgDoNormalizeOnStore(GenTree* tree); GenTree* fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry); // The following check for loops that don't execute calls bool fgLoopCallMarked; void fgLoopCallTest(BasicBlock* srcBB, BasicBlock* dstBB); void fgLoopCallMark(); void fgMarkLoopHead(BasicBlock* block); unsigned fgGetCodeEstimate(BasicBlock* block); #if DUMP_FLOWGRAPHS enum class PhasePosition { PrePhase, PostPhase }; const char* fgProcessEscapes(const char* nameIn, escapeMapping_t* map); static void fgDumpTree(FILE* fgxFile, GenTree* const tree); FILE* fgOpenFlowGraphFile(bool* wbDontClose, Phases phase, PhasePosition pos, LPCWSTR type); bool fgDumpFlowGraph(Phases phase, PhasePosition pos); #endif // DUMP_FLOWGRAPHS #ifdef DEBUG void fgDispDoms(); void fgDispReach(); void fgDispBBLiveness(BasicBlock* block); void fgDispBBLiveness(); void fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth = 0); void fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, bool dumpTrees); void fgDispBasicBlocks(bool dumpTrees = false); void fgDumpStmtTree(Statement* stmt, unsigned bbNum); void fgDumpBlock(BasicBlock* block); void fgDumpTrees(BasicBlock* firstBlock, BasicBlock* lastBlock); static fgWalkPreFn fgStress64RsltMulCB; void fgStress64RsltMul(); void fgDebugCheckUpdate(); void fgDebugCheckBBNumIncreasing(); void fgDebugCheckBBlist(bool checkBBNum = false, bool checkBBRefs = true); void fgDebugCheckBlockLinks(); void fgDebugCheckLinks(bool morphTrees = false); void fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees); void fgDebugCheckNodeLinks(BasicBlock* block, Statement* stmt); void fgDebugCheckNodesUniqueness(); void fgDebugCheckLoopTable(); void fgDebugCheckFlags(GenTree* tree); void fgDebugCheckDispFlags(GenTree* tree, GenTreeFlags dispFlags, GenTreeDebugFlags debugFlags); void fgDebugCheckFlagsHelper(GenTree* tree, GenTreeFlags actualFlags, GenTreeFlags expectedFlags); void fgDebugCheckTryFinallyExits(); void fgDebugCheckProfileData(); bool fgDebugCheckIncomingProfileData(BasicBlock* block); bool fgDebugCheckOutgoingProfileData(BasicBlock* block); #endif // DEBUG static bool fgProfileWeightsEqual(weight_t weight1, weight_t weight2); static bool fgProfileWeightsConsistent(weight_t weight1, weight_t weight2); static GenTree* fgGetFirstNode(GenTree* tree); //--------------------- Walking the trees in the IR ----------------------- struct fgWalkData { Compiler* compiler; fgWalkPreFn* wtprVisitorFn; fgWalkPostFn* wtpoVisitorFn; void* pCallbackData; // user-provided data GenTree* parent; // parent of current node, provided to callback GenTreeStack* parentStack; // stack of parent nodes, if asked for bool wtprLclsOnly; // whether to only visit lclvar nodes #ifdef DEBUG bool printModified; // callback can use this #endif }; fgWalkResult fgWalkTreePre(GenTree** pTree, fgWalkPreFn* visitor, void* pCallBackData = nullptr, bool lclVarsOnly = false, bool computeStack = false); fgWalkResult fgWalkTree(GenTree** pTree, fgWalkPreFn* preVisitor, fgWalkPostFn* postVisitor, void* pCallBackData = nullptr); void fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData); //----- Postorder fgWalkResult fgWalkTreePost(GenTree** pTree, fgWalkPostFn* visitor, void* pCallBackData = nullptr, bool computeStack = false); // An fgWalkPreFn that looks for expressions that have inline throws in // minopts mode. Basically it looks for tress with gtOverflowEx() or // GTF_IND_RNGCHK. It returns WALK_ABORT if one is found. It // returns WALK_SKIP_SUBTREES if GTF_EXCEPT is not set (assumes flags // properly propagated to parent trees). It returns WALK_CONTINUE // otherwise. static fgWalkResult fgChkThrowCB(GenTree** pTree, Compiler::fgWalkData* data); static fgWalkResult fgChkLocAllocCB(GenTree** pTree, Compiler::fgWalkData* data); static fgWalkResult fgChkQmarkCB(GenTree** pTree, Compiler::fgWalkData* data); /************************************************************************** * PROTECTED *************************************************************************/ protected: friend class SsaBuilder; friend struct ValueNumberState; //--------------------- Detect the basic blocks --------------------------- BasicBlock** fgBBs; // Table of pointers to the BBs void fgInitBBLookup(); BasicBlock* fgLookupBB(unsigned addr); bool fgCanSwitchToOptimized(); void fgSwitchToOptimized(const char* reason); bool fgMayExplicitTailCall(); void fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget); void fgMarkBackwardJump(BasicBlock* startBlock, BasicBlock* endBlock); void fgLinkBasicBlocks(); unsigned fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget); void fgCheckBasicBlockControlFlow(); void fgControlFlowPermitted(BasicBlock* blkSrc, BasicBlock* blkDest, bool IsLeave = false /* is the src a leave block */); bool fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc, BasicBlock* blkDest, bool sibling); void fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, bool isInlining); void fgAdjustForAddressExposedOrWrittenThis(); unsigned fgStressBBProf() { #ifdef DEBUG unsigned result = JitConfig.JitStressBBProf(); if (result == 0) { if (compStressCompile(STRESS_BB_PROFILE, 15)) { result = 1; } } return result; #else return 0; #endif } bool fgHaveProfileData(); bool fgGetProfileWeightForBasicBlock(IL_OFFSET offset, weight_t* weight); Instrumentor* fgCountInstrumentor; Instrumentor* fgClassInstrumentor; PhaseStatus fgPrepareToInstrumentMethod(); PhaseStatus fgInstrumentMethod(); PhaseStatus fgIncorporateProfileData(); void fgIncorporateBlockCounts(); void fgIncorporateEdgeCounts(); CORINFO_CLASS_HANDLE getRandomClass(ICorJitInfo::PgoInstrumentationSchema* schema, UINT32 countSchemaItems, BYTE* pInstrumentationData, int32_t ilOffset, CLRRandom* random); public: const char* fgPgoFailReason; bool fgPgoDisabled; ICorJitInfo::PgoSource fgPgoSource; ICorJitInfo::PgoInstrumentationSchema* fgPgoSchema; BYTE* fgPgoData; UINT32 fgPgoSchemaCount; HRESULT fgPgoQueryResult; UINT32 fgNumProfileRuns; UINT32 fgPgoBlockCounts; UINT32 fgPgoEdgeCounts; UINT32 fgPgoClassProfiles; unsigned fgPgoInlineePgo; unsigned fgPgoInlineeNoPgo; unsigned fgPgoInlineeNoPgoSingleBlock; void WalkSpanningTree(SpanningTreeVisitor* visitor); void fgSetProfileWeight(BasicBlock* block, weight_t weight); void fgApplyProfileScale(); bool fgHaveSufficientProfileData(); bool fgHaveTrustedProfileData(); // fgIsUsingProfileWeights - returns true if we have real profile data for this method // or if we have some fake profile data for the stress mode bool fgIsUsingProfileWeights() { return (fgHaveProfileData() || fgStressBBProf()); } // fgProfileRunsCount - returns total number of scenario runs for the profile data // or BB_UNITY_WEIGHT_UNSIGNED when we aren't using profile data. unsigned fgProfileRunsCount() { return fgIsUsingProfileWeights() ? fgNumProfileRuns : BB_UNITY_WEIGHT_UNSIGNED; } //-------- Insert a statement at the start or end of a basic block -------- #ifdef DEBUG public: static bool fgBlockContainsStatementBounded(BasicBlock* block, Statement* stmt, bool answerOnBoundExceeded = true); #endif public: Statement* fgNewStmtAtBeg(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); void fgInsertStmtAtEnd(BasicBlock* block, Statement* stmt); Statement* fgNewStmtAtEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); Statement* fgNewStmtNearEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); private: void fgInsertStmtNearEnd(BasicBlock* block, Statement* stmt); void fgInsertStmtAtBeg(BasicBlock* block, Statement* stmt); void fgInsertStmtAfter(BasicBlock* block, Statement* insertionPoint, Statement* stmt); public: void fgInsertStmtBefore(BasicBlock* block, Statement* insertionPoint, Statement* stmt); private: Statement* fgInsertStmtListAfter(BasicBlock* block, Statement* stmtAfter, Statement* stmtList); // Create a new temporary variable to hold the result of *ppTree, // and transform the graph accordingly. GenTree* fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType = nullptr); GenTree* fgMakeMultiUse(GenTree** ppTree); private: // Recognize a bitwise rotation pattern and convert into a GT_ROL or a GT_ROR node. GenTree* fgRecognizeAndMorphBitwiseRotation(GenTree* tree); bool fgOperIsBitwiseRotationRoot(genTreeOps oper); #if !defined(TARGET_64BIT) // Recognize and morph a long multiplication with 32 bit operands. GenTreeOp* fgRecognizeAndMorphLongMul(GenTreeOp* mul); GenTreeOp* fgMorphLongMul(GenTreeOp* mul); #endif //-------- Determine the order in which the trees will be evaluated ------- unsigned fgTreeSeqNum; GenTree* fgTreeSeqLst; GenTree* fgTreeSeqBeg; GenTree* fgSetTreeSeq(GenTree* tree, GenTree* prev = nullptr, bool isLIR = false); void fgSetTreeSeqHelper(GenTree* tree, bool isLIR); void fgSetTreeSeqFinish(GenTree* tree, bool isLIR); void fgSetStmtSeq(Statement* stmt); void fgSetBlockOrder(BasicBlock* block); //------------------------- Morphing -------------------------------------- unsigned fgPtrArgCntMax; public: //------------------------------------------------------------------------ // fgGetPtrArgCntMax: Return the maximum number of pointer-sized stack arguments that calls inside this method // can push on the stack. This value is calculated during morph. // // Return Value: // Returns fgPtrArgCntMax, that is a private field. // unsigned fgGetPtrArgCntMax() const { return fgPtrArgCntMax; } //------------------------------------------------------------------------ // fgSetPtrArgCntMax: Set the maximum number of pointer-sized stack arguments that calls inside this method // can push on the stack. This function is used during StackLevelSetter to fix incorrect morph calculations. // void fgSetPtrArgCntMax(unsigned argCntMax) { fgPtrArgCntMax = argCntMax; } bool compCanEncodePtrArgCntMax(); private: hashBv* fgOutgoingArgTemps; hashBv* fgCurrentlyInUseArgTemps; void fgSetRngChkTarget(GenTree* tree, bool delay = true); BasicBlock* fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay); #if REARRANGE_ADDS void fgMoveOpsLeft(GenTree* tree); #endif bool fgIsCommaThrow(GenTree* tree, bool forFolding = false); bool fgIsThrow(GenTree* tree); bool fgInDifferentRegions(BasicBlock* blk1, BasicBlock* blk2); bool fgIsBlockCold(BasicBlock* block); GenTree* fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper); GenTree* fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs = true); GenTree* fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs); // A "MorphAddrContext" carries information from the surrounding context. If we are evaluating a byref address, // it is useful to know whether the address will be immediately dereferenced, or whether the address value will // be used, perhaps by passing it as an argument to a called method. This affects how null checking is done: // for sufficiently small offsets, we can rely on OS page protection to implicitly null-check addresses that we // know will be dereferenced. To know that reliance on implicit null checking is sound, we must further know that // all offsets between the top-level indirection and the bottom are constant, and that their sum is sufficiently // small; hence the other fields of MorphAddrContext. enum MorphAddrContextKind { MACK_Ind, MACK_Addr, }; struct MorphAddrContext { MorphAddrContextKind m_kind; bool m_allConstantOffsets; // Valid only for "m_kind == MACK_Ind". True iff all offsets between // top-level indirection and here have been constants. size_t m_totalOffset; // Valid only for "m_kind == MACK_Ind", and if "m_allConstantOffsets" is true. // In that case, is the sum of those constant offsets. MorphAddrContext(MorphAddrContextKind kind) : m_kind(kind), m_allConstantOffsets(true), m_totalOffset(0) { } }; // A MACK_CopyBlock context is immutable, so we can just make one of these and share it. static MorphAddrContext s_CopyBlockMAC; #ifdef FEATURE_SIMD GenTree* getSIMDStructFromField(GenTree* tree, CorInfoType* simdBaseJitTypeOut, unsigned* indexOut, unsigned* simdSizeOut, bool ignoreUsedInSIMDIntrinsic = false); GenTree* fgMorphFieldAssignToSimdSetElement(GenTree* tree); GenTree* fgMorphFieldToSimdGetElement(GenTree* tree); bool fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt); void impMarkContiguousSIMDFieldAssignments(Statement* stmt); // fgPreviousCandidateSIMDFieldAsgStmt is only used for tracking previous simd field assignment // in function: Complier::impMarkContiguousSIMDFieldAssignments. Statement* fgPreviousCandidateSIMDFieldAsgStmt; #endif // FEATURE_SIMD GenTree* fgMorphArrayIndex(GenTree* tree); GenTree* fgMorphExpandCast(GenTreeCast* tree); GenTreeFieldList* fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl); void fgInitArgInfo(GenTreeCall* call); GenTreeCall* fgMorphArgs(GenTreeCall* call); void fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass); GenTree* fgMorphLocalVar(GenTree* tree, bool forceRemorph); public: bool fgAddrCouldBeNull(GenTree* addr); private: GenTree* fgMorphField(GenTree* tree, MorphAddrContext* mac); bool fgCanFastTailCall(GenTreeCall* call, const char** failReason); #if FEATURE_FASTTAILCALL bool fgCallHasMustCopyByrefParameter(GenTreeCall* callee); #endif bool fgCheckStmtAfterTailCall(); GenTree* fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help); bool fgCanTailCallViaJitHelper(); void fgMorphTailCallViaJitHelper(GenTreeCall* call); GenTree* fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall, CORINFO_METHOD_HANDLE callTargetStubHnd, CORINFO_METHOD_HANDLE dispatcherHnd); GenTree* getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle); GenTree* getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle); GenTree* getVirtMethodPointerTree(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); GenTree* getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent); GenTree* fgMorphPotentialTailCall(GenTreeCall* call); GenTree* fgGetStubAddrArg(GenTreeCall* call); unsigned fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry); void fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall); Statement* fgAssignRecursiveCallArgToCallerParam(GenTree* arg, fgArgTabEntry* argTabEntry, unsigned lclParamNum, BasicBlock* block, const DebugInfo& callDI, Statement* tmpAssignmentInsertionPoint, Statement* paramAssignmentInsertionPoint); GenTree* fgMorphCall(GenTreeCall* call); GenTree* fgExpandVirtualVtableCallTarget(GenTreeCall* call); void fgMorphCallInline(GenTreeCall* call, InlineResult* result); void fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext); #if DEBUG void fgNoteNonInlineCandidate(Statement* stmt, GenTreeCall* call); static fgWalkPreFn fgFindNonInlineCandidate; #endif GenTree* fgOptimizeDelegateConstructor(GenTreeCall* call, CORINFO_CONTEXT_HANDLE* ExactContextHnd, CORINFO_RESOLVED_TOKEN* ldftnToken); GenTree* fgMorphLeaf(GenTree* tree); void fgAssignSetVarDef(GenTree* tree); GenTree* fgMorphOneAsgBlockOp(GenTree* tree); GenTree* fgMorphInitBlock(GenTree* tree); GenTree* fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize); GenTree* fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue = false); GenTree* fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd); GenTree* fgMorphCopyBlock(GenTree* tree); GenTree* fgMorphStoreDynBlock(GenTreeStoreDynBlk* tree); GenTree* fgMorphForRegisterFP(GenTree* tree); GenTree* fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac = nullptr); GenTree* fgOptimizeCast(GenTreeCast* cast); GenTree* fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp); GenTree* fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp); #ifdef FEATURE_HW_INTRINSICS GenTree* fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node); #endif GenTree* fgOptimizeCommutativeArithmetic(GenTreeOp* tree); GenTree* fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp); GenTree* fgOptimizeAddition(GenTreeOp* add); GenTree* fgOptimizeMultiply(GenTreeOp* mul); GenTree* fgOptimizeBitwiseAnd(GenTreeOp* andOp); GenTree* fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects); GenTree* fgMorphRetInd(GenTreeUnOp* tree); GenTree* fgMorphModToSubMulDiv(GenTreeOp* tree); GenTree* fgMorphSmpOpOptional(GenTreeOp* tree); GenTree* fgMorphMultiOp(GenTreeMultiOp* multiOp); GenTree* fgMorphConst(GenTree* tree); bool fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2); GenTreeLclVar* fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes = true); GenTreeOp* fgMorphCommutative(GenTreeOp* tree); GenTree* fgMorphCastedBitwiseOp(GenTreeOp* tree); GenTree* fgMorphReduceAddOps(GenTree* tree); public: GenTree* fgMorphTree(GenTree* tree, MorphAddrContext* mac = nullptr); private: void fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree)); void fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree)); void fgMorphTreeDone(GenTree* tree, GenTree* oldTree = nullptr DEBUGARG(int morphNum = 0)); Statement* fgMorphStmt; unsigned fgGetBigOffsetMorphingTemp(var_types type); // We cache one temp per type to be // used when morphing big offset. //----------------------- Liveness analysis ------------------------------- VARSET_TP fgCurUseSet; // vars used by block (before an assignment) VARSET_TP fgCurDefSet; // vars assigned by block (before a use) MemoryKindSet fgCurMemoryUse; // True iff the current basic block uses memory. MemoryKindSet fgCurMemoryDef; // True iff the current basic block modifies memory. MemoryKindSet fgCurMemoryHavoc; // True if the current basic block is known to set memory to a "havoc" value. bool byrefStatesMatchGcHeapStates; // True iff GcHeap and ByrefExposed memory have all the same def points. void fgMarkUseDef(GenTreeLclVarCommon* tree); void fgBeginScopeLife(VARSET_TP* inScope, VarScopeDsc* var); void fgEndScopeLife(VARSET_TP* inScope, VarScopeDsc* var); void fgMarkInScope(BasicBlock* block, VARSET_VALARG_TP inScope); void fgUnmarkInScope(BasicBlock* block, VARSET_VALARG_TP unmarkScope); void fgExtendDbgScopes(); void fgExtendDbgLifetimes(); #ifdef DEBUG void fgDispDebugScopes(); #endif // DEBUG //------------------------------------------------------------------------- // // The following keeps track of any code we've added for things like array // range checking or explicit calls to enable GC, and so on. // public: struct AddCodeDsc { AddCodeDsc* acdNext; BasicBlock* acdDstBlk; // block to which we jump unsigned acdData; SpecialCodeKind acdKind; // what kind of a special block is this? #if !FEATURE_FIXED_OUT_ARGS bool acdStkLvlInit; // has acdStkLvl value been already set? unsigned acdStkLvl; // stack level in stack slots. #endif // !FEATURE_FIXED_OUT_ARGS }; private: static unsigned acdHelper(SpecialCodeKind codeKind); AddCodeDsc* fgAddCodeList; bool fgAddCodeModf; bool fgRngChkThrowAdded; AddCodeDsc* fgExcptnTargetCache[SCK_COUNT]; BasicBlock* fgRngChkTarget(BasicBlock* block, SpecialCodeKind kind); BasicBlock* fgAddCodeRef(BasicBlock* srcBlk, unsigned refData, SpecialCodeKind kind); public: AddCodeDsc* fgFindExcptnTarget(SpecialCodeKind kind, unsigned refData); bool fgUseThrowHelperBlocks(); AddCodeDsc* fgGetAdditionalCodeDescriptors() { return fgAddCodeList; } private: bool fgIsCodeAdded(); bool fgIsThrowHlpBlk(BasicBlock* block); #if !FEATURE_FIXED_OUT_ARGS unsigned fgThrowHlpBlkStkLevel(BasicBlock* block); #endif // !FEATURE_FIXED_OUT_ARGS unsigned fgBigOffsetMorphingTemps[TYP_COUNT]; unsigned fgCheckInlineDepthAndRecursion(InlineInfo* inlineInfo); void fgInvokeInlineeCompiler(GenTreeCall* call, InlineResult* result, InlineContext** createdContext); void fgInsertInlineeBlocks(InlineInfo* pInlineInfo); Statement* fgInlinePrependStatements(InlineInfo* inlineInfo); void fgInlineAppendStatements(InlineInfo* inlineInfo, BasicBlock* block, Statement* stmt); #if FEATURE_MULTIREG_RET GenTree* fgGetStructAsStructPtr(GenTree* tree); GenTree* fgAssignStructInlineeToVar(GenTree* child, CORINFO_CLASS_HANDLE retClsHnd); void fgAttachStructInlineeToAsg(GenTree* tree, GenTree* child, CORINFO_CLASS_HANDLE retClsHnd); #endif // FEATURE_MULTIREG_RET static fgWalkPreFn fgUpdateInlineReturnExpressionPlaceHolder; static fgWalkPostFn fgLateDevirtualization; #ifdef DEBUG static fgWalkPreFn fgDebugCheckInlineCandidates; void CheckNoTransformableIndirectCallsRemain(); static fgWalkPreFn fgDebugCheckForTransformableIndirectCalls; #endif void fgPromoteStructs(); void fgMorphStructField(GenTree* tree, GenTree* parent); void fgMorphLocalField(GenTree* tree, GenTree* parent); // Reset the refCount for implicit byrefs. void fgResetImplicitByRefRefCount(); // Change implicit byrefs' types from struct to pointer, and for any that were // promoted, create new promoted struct temps. void fgRetypeImplicitByRefArgs(); // Rewrite appearances of implicit byrefs (manifest the implied additional level of indirection). bool fgMorphImplicitByRefArgs(GenTree* tree); GenTree* fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr); // Clear up annotations for any struct promotion temps created for implicit byrefs. void fgMarkDemotedImplicitByRefArgs(); void fgMarkAddressExposedLocals(); void fgMarkAddressExposedLocals(Statement* stmt); PhaseStatus fgForwardSub(); bool fgForwardSubBlock(BasicBlock* block); bool fgForwardSubStatement(Statement* statement); static fgWalkPreFn fgUpdateSideEffectsPre; static fgWalkPostFn fgUpdateSideEffectsPost; // The given local variable, required to be a struct variable, is being assigned via // a "lclField", to make it masquerade as an integral type in the ABI. Make sure that // the variable is not enregistered, and is therefore not promoted independently. void fgLclFldAssign(unsigned lclNum); static fgWalkPreFn gtHasLocalsWithAddrOpCB; enum TypeProducerKind { TPK_Unknown = 0, // May not be a RuntimeType TPK_Handle = 1, // RuntimeType via handle TPK_GetType = 2, // RuntimeType via Object.get_Type() TPK_Null = 3, // Tree value is null TPK_Other = 4 // RuntimeType via other means }; TypeProducerKind gtGetTypeProducerKind(GenTree* tree); bool gtIsTypeHandleToRuntimeTypeHelper(GenTreeCall* call); bool gtIsTypeHandleToRuntimeTypeHandleHelper(GenTreeCall* call, CorInfoHelpFunc* pHelper = nullptr); bool gtIsActiveCSE_Candidate(GenTree* tree); bool fgIsBigOffset(size_t offset); bool fgNeedReturnSpillTemp(); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Optimizer XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: void optInit(); GenTree* optRemoveRangeCheck(GenTreeBoundsChk* check, GenTree* comma, Statement* stmt); GenTree* optRemoveStandaloneRangeCheck(GenTreeBoundsChk* check, Statement* stmt); void optRemoveCommaBasedRangeCheck(GenTree* comma, Statement* stmt); protected: // Do hoisting for all loops. void optHoistLoopCode(); // To represent sets of VN's that have already been hoisted in outer loops. typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, bool> VNSet; struct LoopHoistContext { private: // The set of variables hoisted in the current loop (or nullptr if there are none). VNSet* m_pHoistedInCurLoop; public: // Value numbers of expressions that have been hoisted in parent loops in the loop nest. VNSet m_hoistedInParentLoops; // Value numbers of expressions that have been hoisted in the current (or most recent) loop in the nest. // Previous decisions on loop-invariance of value numbers in the current loop. VNSet m_curLoopVnInvariantCache; VNSet* GetHoistedInCurLoop(Compiler* comp) { if (m_pHoistedInCurLoop == nullptr) { m_pHoistedInCurLoop = new (comp->getAllocatorLoopHoist()) VNSet(comp->getAllocatorLoopHoist()); } return m_pHoistedInCurLoop; } VNSet* ExtractHoistedInCurLoop() { VNSet* res = m_pHoistedInCurLoop; m_pHoistedInCurLoop = nullptr; return res; } LoopHoistContext(Compiler* comp) : m_pHoistedInCurLoop(nullptr) , m_hoistedInParentLoops(comp->getAllocatorLoopHoist()) , m_curLoopVnInvariantCache(comp->getAllocatorLoopHoist()) { } }; // Do hoisting for loop "lnum" (an index into the optLoopTable), and all loops nested within it. // Tracks the expressions that have been hoisted by containing loops by temporarily recording their // value numbers in "m_hoistedInParentLoops". This set is not modified by the call. void optHoistLoopNest(unsigned lnum, LoopHoistContext* hoistCtxt); // Do hoisting for a particular loop ("lnum" is an index into the optLoopTable.) // Assumes that expressions have been hoisted in containing loops if their value numbers are in // "m_hoistedInParentLoops". // void optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt); // Hoist all expressions in "blocks" that are invariant in loop "loopNum" (an index into the optLoopTable) // outside of that loop. Exempt expressions whose value number is in "m_hoistedInParentLoops"; add VN's of hoisted // expressions to "hoistInLoop". void optHoistLoopBlocks(unsigned loopNum, ArrayStack<BasicBlock*>* blocks, LoopHoistContext* hoistContext); // Return true if the tree looks profitable to hoist out of loop 'lnum'. bool optIsProfitableToHoistTree(GenTree* tree, unsigned lnum); // Performs the hoisting 'tree' into the PreHeader for loop 'lnum' void optHoistCandidate(GenTree* tree, BasicBlock* treeBb, unsigned lnum, LoopHoistContext* hoistCtxt); // Returns true iff the ValueNum "vn" represents a value that is loop-invariant in "lnum". // Constants and init values are always loop invariant. // VNPhi's connect VN's to the SSA definition, so we can know if the SSA def occurs in the loop. bool optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNSet* recordedVNs); // If "blk" is the entry block of a natural loop, returns true and sets "*pLnum" to the index of the loop // in the loop table. bool optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLnum); // Records the set of "side effects" of all loops: fields (object instance and static) // written to, and SZ-array element type equivalence classes updated. void optComputeLoopSideEffects(); #ifdef DEBUG bool optAnyChildNotRemoved(unsigned loopNum); #endif // DEBUG // Mark a loop as removed. void optMarkLoopRemoved(unsigned loopNum); private: // Requires "lnum" to be the index of an outermost loop in the loop table. Traverses the body of that loop, // including all nested loops, and records the set of "side effects" of the loop: fields (object instance and // static) written to, and SZ-array element type equivalence classes updated. void optComputeLoopNestSideEffects(unsigned lnum); // Given a loop number 'lnum' mark it and any nested loops as having 'memoryHavoc' void optRecordLoopNestsMemoryHavoc(unsigned lnum, MemoryKindSet memoryHavoc); // Add the side effects of "blk" (which is required to be within a loop) to all loops of which it is a part. // Returns false if we encounter a block that is not marked as being inside a loop. // bool optComputeLoopSideEffectsOfBlock(BasicBlock* blk); // Hoist the expression "expr" out of loop "lnum". void optPerformHoistExpr(GenTree* expr, BasicBlock* exprBb, unsigned lnum); public: void optOptimizeBools(); public: PhaseStatus optInvertLoops(); // Invert loops so they're entered at top and tested at bottom. PhaseStatus optOptimizeLayout(); // Optimize the BasicBlock layout of the method PhaseStatus optSetBlockWeights(); PhaseStatus optFindLoopsPhase(); // Finds loops and records them in the loop table void optFindLoops(); PhaseStatus optCloneLoops(); void optCloneLoop(unsigned loopInd, LoopCloneContext* context); void optEnsureUniqueHead(unsigned loopInd, weight_t ambientWeight); PhaseStatus optUnrollLoops(); // Unrolls loops (needs to have cost info) void optRemoveRedundantZeroInits(); protected: // This enumeration describes what is killed by a call. enum callInterf { CALLINT_NONE, // no interference (most helpers) CALLINT_REF_INDIRS, // kills GC ref indirections (SETFIELD OBJ) CALLINT_SCL_INDIRS, // kills non GC ref indirections (SETFIELD non-OBJ) CALLINT_ALL_INDIRS, // kills both GC ref and non GC ref indirections (SETFIELD STRUCT) CALLINT_ALL, // kills everything (normal method call) }; enum class FieldKindForVN { SimpleStatic, WithBaseAddr }; public: // A "LoopDsc" describes a ("natural") loop. We (currently) require the body of a loop to be a contiguous (in // bbNext order) sequence of basic blocks. (At times, we may require the blocks in a loop to be "properly numbered" // in bbNext order; we use comparisons on the bbNum to decide order.) // The blocks that define the body are // top <= entry <= bottom // The "head" of the loop is a block outside the loop that has "entry" as a successor. We only support loops with a // single 'head' block. The meanings of these blocks are given in the definitions below. Also see the picture at // Compiler::optFindNaturalLoops(). struct LoopDsc { BasicBlock* lpHead; // HEAD of the loop (not part of the looping of the loop) -- has ENTRY as a successor. BasicBlock* lpTop; // loop TOP (the back edge from lpBottom reaches here). Lexically first block (in bbNext // order) reachable in this loop. BasicBlock* lpEntry; // the ENTRY in the loop (in most cases TOP or BOTTOM) BasicBlock* lpBottom; // loop BOTTOM (from here we have a back edge to the TOP) BasicBlock* lpExit; // if a single exit loop this is the EXIT (in most cases BOTTOM) callInterf lpAsgCall; // "callInterf" for calls in the loop ALLVARSET_TP lpAsgVars; // set of vars assigned within the loop (all vars, not just tracked) varRefKinds lpAsgInds : 8; // set of inds modified within the loop LoopFlags lpFlags; unsigned char lpExitCnt; // number of exits from the loop unsigned char lpParent; // The index of the most-nested loop that completely contains this one, // or else BasicBlock::NOT_IN_LOOP if no such loop exists. unsigned char lpChild; // The index of a nested loop, or else BasicBlock::NOT_IN_LOOP if no child exists. // (Actually, an "immediately" nested loop -- // no other child of this loop is a parent of lpChild.) unsigned char lpSibling; // The index of another loop that is an immediate child of lpParent, // or else BasicBlock::NOT_IN_LOOP. One can enumerate all the children of a loop // by following "lpChild" then "lpSibling" links. bool lpLoopHasMemoryHavoc[MemoryKindCount]; // The loop contains an operation that we assume has arbitrary // memory side effects. If this is set, the fields below // may not be accurate (since they become irrelevant.) VARSET_TP lpVarInOut; // The set of variables that are IN or OUT during the execution of this loop VARSET_TP lpVarUseDef; // The set of variables that are USE or DEF during the execution of this loop // The following counts are used for hoisting profitability checks. int lpHoistedExprCount; // The register count for the non-FP expressions from inside this loop that have been // hoisted int lpLoopVarCount; // The register count for the non-FP LclVars that are read/written inside this loop int lpVarInOutCount; // The register count for the non-FP LclVars that are alive inside or across this loop int lpHoistedFPExprCount; // The register count for the FP expressions from inside this loop that have been // hoisted int lpLoopVarFPCount; // The register count for the FP LclVars that are read/written inside this loop int lpVarInOutFPCount; // The register count for the FP LclVars that are alive inside or across this loop typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<struct CORINFO_FIELD_STRUCT_>, FieldKindForVN> FieldHandleSet; FieldHandleSet* lpFieldsModified; // This has entries for all static field and object instance fields modified // in the loop. typedef JitHashTable<CORINFO_CLASS_HANDLE, JitPtrKeyFuncs<struct CORINFO_CLASS_STRUCT_>, bool> ClassHandleSet; ClassHandleSet* lpArrayElemTypesModified; // Bits set indicate the set of sz array element types such that // arrays of that type are modified // in the loop. // Adds the variable liveness information for 'blk' to 'this' LoopDsc void AddVariableLiveness(Compiler* comp, BasicBlock* blk); inline void AddModifiedField(Compiler* comp, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind); // This doesn't *always* take a class handle -- it can also take primitive types, encoded as class handles // (shifted left, with a low-order bit set to distinguish.) // Use the {Encode/Decode}ElemType methods to construct/destruct these. inline void AddModifiedElemType(Compiler* comp, CORINFO_CLASS_HANDLE structHnd); /* The following values are set only for iterator loops, i.e. has the flag LPFLG_ITER set */ GenTree* lpIterTree; // The "i = i <op> const" tree unsigned lpIterVar() const; // iterator variable # int lpIterConst() const; // the constant with which the iterator is incremented genTreeOps lpIterOper() const; // the type of the operation on the iterator (ASG_ADD, ASG_SUB, etc.) void VERIFY_lpIterTree() const; var_types lpIterOperType() const; // For overflow instructions // Set to the block where we found the initialization for LPFLG_CONST_INIT or LPFLG_VAR_INIT loops. // Initially, this will be 'head', but 'head' might change if we insert a loop pre-header block. BasicBlock* lpInitBlock; union { int lpConstInit; // initial constant value of iterator // : Valid if LPFLG_CONST_INIT unsigned lpVarInit; // initial local var number to which we initialize the iterator // : Valid if LPFLG_VAR_INIT }; // The following is for LPFLG_ITER loops only (i.e. the loop condition is "i RELOP const or var") GenTree* lpTestTree; // pointer to the node containing the loop test genTreeOps lpTestOper() const; // the type of the comparison between the iterator and the limit (GT_LE, GT_GE, // etc.) void VERIFY_lpTestTree() const; bool lpIsReversed() const; // true if the iterator node is the second operand in the loop condition GenTree* lpIterator() const; // the iterator node in the loop test GenTree* lpLimit() const; // the limit node in the loop test // Limit constant value of iterator - loop condition is "i RELOP const" // : Valid if LPFLG_CONST_LIMIT int lpConstLimit() const; // The lclVar # in the loop condition ( "i RELOP lclVar" ) // : Valid if LPFLG_VAR_LIMIT unsigned lpVarLimit() const; // The array length in the loop condition ( "i RELOP arr.len" or "i RELOP arr[i][j].len" ) // : Valid if LPFLG_ARRLEN_LIMIT bool lpArrLenLimit(Compiler* comp, ArrIndex* index) const; // Returns "true" iff this is a "top entry" loop. bool lpIsTopEntry() const { if (lpHead->bbNext == lpEntry) { assert(lpHead->bbFallsThrough()); assert(lpTop == lpEntry); return true; } else { return false; } } // Returns "true" iff "*this" contains the blk. bool lpContains(BasicBlock* blk) const { return lpTop->bbNum <= blk->bbNum && blk->bbNum <= lpBottom->bbNum; } // Returns "true" iff "*this" (properly) contains the range [top, bottom] (allowing tops // to be equal, but requiring bottoms to be different.) bool lpContains(BasicBlock* top, BasicBlock* bottom) const { return lpTop->bbNum <= top->bbNum && bottom->bbNum < lpBottom->bbNum; } // Returns "true" iff "*this" (properly) contains "lp2" (allowing tops to be equal, but requiring // bottoms to be different.) bool lpContains(const LoopDsc& lp2) const { return lpContains(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff "*this" is (properly) contained by the range [top, bottom] // (allowing tops to be equal, but requiring bottoms to be different.) bool lpContainedBy(BasicBlock* top, BasicBlock* bottom) const { return top->bbNum <= lpTop->bbNum && lpBottom->bbNum < bottom->bbNum; } // Returns "true" iff "*this" is (properly) contained by "lp2" // (allowing tops to be equal, but requiring bottoms to be different.) bool lpContainedBy(const LoopDsc& lp2) const { return lpContainedBy(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff "*this" is disjoint from the range [top, bottom]. bool lpDisjoint(BasicBlock* top, BasicBlock* bottom) const { return bottom->bbNum < lpTop->bbNum || lpBottom->bbNum < top->bbNum; } // Returns "true" iff "*this" is disjoint from "lp2". bool lpDisjoint(const LoopDsc& lp2) const { return lpDisjoint(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff the loop is well-formed (see code for defn). bool lpWellFormed() const { return lpTop->bbNum <= lpEntry->bbNum && lpEntry->bbNum <= lpBottom->bbNum && (lpHead->bbNum < lpTop->bbNum || lpHead->bbNum > lpBottom->bbNum); } #ifdef DEBUG void lpValidatePreHeader() const { // If this is called, we expect there to be a pre-header. assert(lpFlags & LPFLG_HAS_PREHEAD); // The pre-header must unconditionally enter the loop. assert(lpHead->GetUniqueSucc() == lpEntry); // The loop block must be marked as a pre-header. assert(lpHead->bbFlags & BBF_LOOP_PREHEADER); // The loop entry must have a single non-loop predecessor, which is the pre-header. // We can't assume here that the bbNum are properly ordered, so we can't do a simple lpContained() // check. So, we defer this check, which will be done by `fgDebugCheckLoopTable()`. } #endif // DEBUG // LoopBlocks: convenience method for enabling range-based `for` iteration over all the // blocks in a loop, e.g.: // for (BasicBlock* const block : loop->LoopBlocks()) ... // Currently, the loop blocks are expected to be in linear, lexical, `bbNext` order // from `lpTop` through `lpBottom`, inclusive. All blocks in this range are considered // to be part of the loop. // BasicBlockRangeList LoopBlocks() const { return BasicBlockRangeList(lpTop, lpBottom); } }; protected: bool fgMightHaveLoop(); // returns true if there are any back edges bool fgHasLoops; // True if this method has any loops, set in fgComputeReachability public: LoopDsc* optLoopTable; // loop descriptor table unsigned char optLoopCount; // number of tracked loops unsigned char loopAlignCandidates; // number of loops identified for alignment // Every time we rebuild the loop table, we increase the global "loop epoch". Any loop indices or // loop table pointers from the previous epoch are invalid. // TODO: validate this in some way? unsigned optCurLoopEpoch; void NewLoopEpoch() { ++optCurLoopEpoch; JITDUMP("New loop epoch %d\n", optCurLoopEpoch); } #ifdef DEBUG unsigned char loopsAligned; // number of loops actually aligned #endif // DEBUG bool optRecordLoop(BasicBlock* head, BasicBlock* top, BasicBlock* entry, BasicBlock* bottom, BasicBlock* exit, unsigned char exitCnt); void optClearLoopIterInfo(); #ifdef DEBUG void optPrintLoopInfo(unsigned lnum, bool printVerbose = false); void optPrintLoopInfo(const LoopDsc* loop, bool printVerbose = false); void optPrintLoopTable(); #endif protected: unsigned optCallCount; // number of calls made in the method unsigned optIndirectCallCount; // number of virtual, interface and indirect calls made in the method unsigned optNativeCallCount; // number of Pinvoke/Native calls made in the method unsigned optLoopsCloned; // number of loops cloned in the current method. #ifdef DEBUG void optCheckPreds(); #endif void optResetLoopInfo(); void optFindAndScaleGeneralLoopBlocks(); // Determine if there are any potential loops, and set BBF_LOOP_HEAD on potential loop heads. void optMarkLoopHeads(); void optScaleLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk); void optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk); void optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmarkLoop = false); bool optIsLoopTestEvalIntoTemp(Statement* testStmt, Statement** newTestStmt); unsigned optIsLoopIncrTree(GenTree* incr); bool optCheckIterInLoopTest(unsigned loopInd, GenTree* test, BasicBlock* from, BasicBlock* to, unsigned iterVar); bool optComputeIterInfo(GenTree* incr, BasicBlock* from, BasicBlock* to, unsigned* pIterVar); bool optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenTree* init, unsigned iterVar); bool optExtractInitTestIncr( BasicBlock* head, BasicBlock* bottom, BasicBlock* exit, GenTree** ppInit, GenTree** ppTest, GenTree** ppIncr); void optFindNaturalLoops(); void optIdentifyLoopsForAlignment(); // Ensures that all the loops in the loop nest rooted at "loopInd" (an index into the loop table) are 'canonical' -- // each loop has a unique "top." Returns "true" iff the flowgraph has been modified. bool optCanonicalizeLoopNest(unsigned char loopInd); // Ensures that the loop "loopInd" (an index into the loop table) is 'canonical' -- it has a unique "top," // unshared with any other loop. Returns "true" iff the flowgraph has been modified bool optCanonicalizeLoop(unsigned char loopInd); // Requires "l1" to be a valid loop table index, and not "BasicBlock::NOT_IN_LOOP". // Requires "l2" to be a valid loop table index, or else "BasicBlock::NOT_IN_LOOP". // Returns true iff "l2" is not NOT_IN_LOOP, and "l1" contains "l2". // A loop contains itself. bool optLoopContains(unsigned l1, unsigned l2) const; // Updates the loop table by changing loop "loopInd", whose head is required // to be "from", to be "to". Also performs this transformation for any // loop nested in "loopInd" that shares the same head as "loopInd". void optUpdateLoopHead(unsigned loopInd, BasicBlock* from, BasicBlock* to); void optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, const bool updatePreds = false); // Marks the containsCall information to "lnum" and any parent loops. void AddContainsCallAllContainingLoops(unsigned lnum); // Adds the variable liveness information from 'blk' to "lnum" and any parent loops. void AddVariableLivenessAllContainingLoops(unsigned lnum, BasicBlock* blk); // Adds "fldHnd" to the set of modified fields of "lnum" and any parent loops. void AddModifiedFieldAllContainingLoops(unsigned lnum, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind); // Adds "elemType" to the set of modified array element types of "lnum" and any parent loops. void AddModifiedElemTypeAllContainingLoops(unsigned lnum, CORINFO_CLASS_HANDLE elemType); // Requires that "from" and "to" have the same "bbJumpKind" (perhaps because "to" is a clone // of "from".) Copies the jump destination from "from" to "to". void optCopyBlkDest(BasicBlock* from, BasicBlock* to); // Returns true if 'block' is an entry block for any loop in 'optLoopTable' bool optIsLoopEntry(BasicBlock* block) const; // The depth of the loop described by "lnum" (an index into the loop table.) (0 == top level) unsigned optLoopDepth(unsigned lnum) { assert(lnum < optLoopCount); unsigned depth = 0; while ((lnum = optLoopTable[lnum].lpParent) != BasicBlock::NOT_IN_LOOP) { ++depth; } return depth; } // Struct used in optInvertWhileLoop to count interesting constructs to boost the profitability score. struct OptInvertCountTreeInfoType { int sharedStaticHelperCount; int arrayLengthCount; }; static fgWalkResult optInvertCountTreeInfo(GenTree** pTree, fgWalkData* data); bool optInvertWhileLoop(BasicBlock* block); private: static bool optIterSmallOverflow(int iterAtExit, var_types incrType); static bool optIterSmallUnderflow(int iterAtExit, var_types decrType); bool optComputeLoopRep(int constInit, int constLimit, int iterInc, genTreeOps iterOper, var_types iterType, genTreeOps testOper, bool unsignedTest, bool dupCond, unsigned* iterCount); static fgWalkPreFn optIsVarAssgCB; protected: bool optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTree* skip, unsigned var); bool optIsVarAssgLoop(unsigned lnum, unsigned var); int optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKinds inds = VR_NONE); bool optNarrowTree(GenTree* tree, var_types srct, var_types dstt, ValueNumPair vnpNarrow, bool doit); protected: // The following is the upper limit on how many expressions we'll keep track // of for the CSE analysis. // static const unsigned MAX_CSE_CNT = EXPSET_SZ; static const int MIN_CSE_COST = 2; // BitVec trait information only used by the optCSE_canSwap() method, for the CSE_defMask and CSE_useMask. // This BitVec uses one bit per CSE candidate BitVecTraits* cseMaskTraits; // one bit per CSE candidate // BitVec trait information for computing CSE availability using the CSE_DataFlow algorithm. // Two bits are allocated per CSE candidate to compute CSE availability // plus an extra bit to handle the initial unvisited case. // (See CSE_DataFlow::EndMerge for an explanation of why this is necessary.) // // The two bits per CSE candidate have the following meanings: // 11 - The CSE is available, and is also available when considering calls as killing availability. // 10 - The CSE is available, but is not available when considering calls as killing availability. // 00 - The CSE is not available // 01 - An illegal combination // BitVecTraits* cseLivenessTraits; //----------------------------------------------------------------------------------------------------------------- // getCSEnum2bit: Return the normalized index to use in the EXPSET_TP for the CSE with the given CSE index. // Each GenTree has a `gtCSEnum` field. Zero is reserved to mean this node is not a CSE, positive values indicate // CSE uses, and negative values indicate CSE defs. The caller must pass a non-zero positive value, as from // GET_CSE_INDEX(). // static unsigned genCSEnum2bit(unsigned CSEnum) { assert((CSEnum > 0) && (CSEnum <= MAX_CSE_CNT)); return CSEnum - 1; } //----------------------------------------------------------------------------------------------------------------- // getCSEAvailBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit for a CSE. // static unsigned getCSEAvailBit(unsigned CSEnum) { return genCSEnum2bit(CSEnum) * 2; } //----------------------------------------------------------------------------------------------------------------- // getCSEAvailCrossCallBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit // for a CSE considering calls as killing availability bit (see description above). // static unsigned getCSEAvailCrossCallBit(unsigned CSEnum) { return getCSEAvailBit(CSEnum) + 1; } void optPrintCSEDataFlowSet(EXPSET_VALARG_TP cseDataFlowSet, bool includeBits = true); EXPSET_TP cseCallKillsMask; // Computed once - A mask that is used to kill available CSEs at callsites /* Generic list of nodes - used by the CSE logic */ struct treeLst { treeLst* tlNext; GenTree* tlTree; }; struct treeStmtLst { treeStmtLst* tslNext; GenTree* tslTree; // tree node Statement* tslStmt; // statement containing the tree BasicBlock* tslBlock; // block containing the statement }; // The following logic keeps track of expressions via a simple hash table. struct CSEdsc { CSEdsc* csdNextInBucket; // used by the hash table size_t csdHashKey; // the orginal hashkey ssize_t csdConstDefValue; // When we CSE similar constants, this is the value that we use as the def ValueNum csdConstDefVN; // When we CSE similar constants, this is the ValueNumber that we use for the LclVar // assignment unsigned csdIndex; // 1..optCSECandidateCount bool csdIsSharedConst; // true if this CSE is a shared const bool csdLiveAcrossCall; unsigned short csdDefCount; // definition count unsigned short csdUseCount; // use count (excluding the implicit uses at defs) weight_t csdDefWtCnt; // weighted def count weight_t csdUseWtCnt; // weighted use count (excluding the implicit uses at defs) GenTree* csdTree; // treenode containing the 1st occurrence Statement* csdStmt; // stmt containing the 1st occurrence BasicBlock* csdBlock; // block containing the 1st occurrence treeStmtLst* csdTreeList; // list of matching tree nodes: head treeStmtLst* csdTreeLast; // list of matching tree nodes: tail // ToDo: This can be removed when gtGetStructHandleIfPresent stops guessing // and GT_IND nodes always have valid struct handle. // CORINFO_CLASS_HANDLE csdStructHnd; // The class handle, currently needed to create a SIMD LclVar in PerformCSE bool csdStructHndMismatch; ValueNum defExcSetPromise; // The exception set that is now required for all defs of this CSE. // This will be set to NoVN if we decide to abandon this CSE ValueNum defExcSetCurrent; // The set of exceptions we currently can use for CSE uses. ValueNum defConservNormVN; // if all def occurrences share the same conservative normal value // number, this will reflect it; otherwise, NoVN. // not used for shared const CSE's }; static const size_t s_optCSEhashSizeInitial; static const size_t s_optCSEhashGrowthFactor; static const size_t s_optCSEhashBucketSize; size_t optCSEhashSize; // The current size of hashtable size_t optCSEhashCount; // Number of entries in hashtable size_t optCSEhashMaxCountBeforeResize; // Number of entries before resize CSEdsc** optCSEhash; CSEdsc** optCSEtab; typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, GenTree*> NodeToNodeMap; NodeToNodeMap* optCseCheckedBoundMap; // Maps bound nodes to ancestor compares that should be // re-numbered with the bound to improve range check elimination // Given a compare, look for a cse candidate checked bound feeding it and add a map entry if found. void optCseUpdateCheckedBoundMap(GenTree* compare); void optCSEstop(); CSEdsc* optCSEfindDsc(unsigned index); bool optUnmarkCSE(GenTree* tree); // user defined callback data for the tree walk function optCSE_MaskHelper() struct optCSE_MaskData { EXPSET_TP CSE_defMask; EXPSET_TP CSE_useMask; }; // Treewalk helper for optCSE_DefMask and optCSE_UseMask static fgWalkPreFn optCSE_MaskHelper; // This function walks all the node for an given tree // and return the mask of CSE definitions and uses for the tree // void optCSE_GetMaskData(GenTree* tree, optCSE_MaskData* pMaskData); // Given a binary tree node return true if it is safe to swap the order of evaluation for op1 and op2. bool optCSE_canSwap(GenTree* firstNode, GenTree* secondNode); struct optCSEcostCmpEx { bool operator()(const CSEdsc* op1, const CSEdsc* op2); }; struct optCSEcostCmpSz { bool operator()(const CSEdsc* op1, const CSEdsc* op2); }; void optCleanupCSEs(); #ifdef DEBUG void optEnsureClearCSEInfo(); #endif // DEBUG static bool Is_Shared_Const_CSE(size_t key) { return ((key & TARGET_SIGN_BIT) != 0); } // returns the encoded key static size_t Encode_Shared_Const_CSE_Value(size_t key) { return TARGET_SIGN_BIT | (key >> CSE_CONST_SHARED_LOW_BITS); } // returns the orginal key static size_t Decode_Shared_Const_CSE_Value(size_t enckey) { assert(Is_Shared_Const_CSE(enckey)); return (enckey & ~TARGET_SIGN_BIT) << CSE_CONST_SHARED_LOW_BITS; } /************************************************************************** * Value Number based CSEs *************************************************************************/ // String to use for formatting CSE numbers. Note that this is the positive number, e.g., from GET_CSE_INDEX(). #define FMT_CSE "CSE #%02u" public: void optOptimizeValnumCSEs(); protected: void optValnumCSE_Init(); unsigned optValnumCSE_Index(GenTree* tree, Statement* stmt); bool optValnumCSE_Locate(); void optValnumCSE_InitDataFlow(); void optValnumCSE_DataFlow(); void optValnumCSE_Availablity(); void optValnumCSE_Heuristic(); bool optDoCSE; // True when we have found a duplicate CSE tree bool optValnumCSE_phase; // True when we are executing the optOptimizeValnumCSEs() phase unsigned optCSECandidateCount; // Count of CSE's candidates unsigned optCSEstart; // The first local variable number that is a CSE unsigned optCSEcount; // The total count of CSE's introduced. weight_t optCSEweight; // The weight of the current block when we are doing PerformCSE bool optIsCSEcandidate(GenTree* tree); // lclNumIsTrueCSE returns true if the LclVar was introduced by the CSE phase of the compiler // bool lclNumIsTrueCSE(unsigned lclNum) const { return ((optCSEcount > 0) && (lclNum >= optCSEstart) && (lclNum < optCSEstart + optCSEcount)); } // lclNumIsCSE returns true if the LclVar should be treated like a CSE with regards to constant prop. // bool lclNumIsCSE(unsigned lclNum) const { return lvaGetDesc(lclNum)->lvIsCSE; } #ifdef DEBUG bool optConfigDisableCSE(); bool optConfigDisableCSE2(); #endif void optOptimizeCSEs(); struct isVarAssgDsc { GenTree* ivaSkip; ALLVARSET_TP ivaMaskVal; // Set of variables assigned to. This is a set of all vars, not tracked vars. #ifdef DEBUG void* ivaSelf; #endif unsigned ivaVar; // Variable we are interested in, or -1 varRefKinds ivaMaskInd; // What kind of indirect assignments are there? callInterf ivaMaskCall; // What kind of calls are there? bool ivaMaskIncomplete; // Variables not representable in ivaMaskVal were assigned to. }; static callInterf optCallInterf(GenTreeCall* call); public: // VN based copy propagation. // In DEBUG builds, we'd like to know the tree that the SSA definition was pushed for. // While for ordinary SSA defs it will be available (as an ASG) in the SSA descriptor, // for locals which will use "definitions from uses", it will not be, so we store it // in this class instead. class CopyPropSsaDef { LclSsaVarDsc* m_ssaDef; #ifdef DEBUG GenTree* m_defNode; #endif public: CopyPropSsaDef(LclSsaVarDsc* ssaDef, GenTree* defNode) : m_ssaDef(ssaDef) #ifdef DEBUG , m_defNode(defNode) #endif { } LclSsaVarDsc* GetSsaDef() const { return m_ssaDef; } #ifdef DEBUG GenTree* GetDefNode() const { return m_defNode; } #endif }; typedef ArrayStack<CopyPropSsaDef> CopyPropSsaDefStack; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, CopyPropSsaDefStack*> LclNumToLiveDefsMap; // Copy propagation functions. void optCopyProp(Statement* stmt, GenTreeLclVarCommon* tree, unsigned lclNum, LclNumToLiveDefsMap* curSsaName); void optBlockCopyPropPopStacks(BasicBlock* block, LclNumToLiveDefsMap* curSsaName); void optBlockCopyProp(BasicBlock* block, LclNumToLiveDefsMap* curSsaName); void optCopyPropPushDef(GenTreeOp* asg, GenTreeLclVarCommon* lclNode, unsigned lclNum, LclNumToLiveDefsMap* curSsaName); unsigned optIsSsaLocal(GenTreeLclVarCommon* lclNode); int optCopyProp_LclVarScore(const LclVarDsc* lclVarDsc, const LclVarDsc* copyVarDsc, bool preferOp2); void optVnCopyProp(); INDEBUG(void optDumpCopyPropStack(LclNumToLiveDefsMap* curSsaName)); /************************************************************************** * Early value propagation *************************************************************************/ struct SSAName { unsigned m_lvNum; unsigned m_ssaNum; SSAName(unsigned lvNum, unsigned ssaNum) : m_lvNum(lvNum), m_ssaNum(ssaNum) { } static unsigned GetHashCode(SSAName ssaNm) { return (ssaNm.m_lvNum << 16) | (ssaNm.m_ssaNum); } static bool Equals(SSAName ssaNm1, SSAName ssaNm2) { return (ssaNm1.m_lvNum == ssaNm2.m_lvNum) && (ssaNm1.m_ssaNum == ssaNm2.m_ssaNum); } }; #define OMF_HAS_NEWARRAY 0x00000001 // Method contains 'new' of an array #define OMF_HAS_NEWOBJ 0x00000002 // Method contains 'new' of an object type. #define OMF_HAS_ARRAYREF 0x00000004 // Method contains array element loads or stores. #define OMF_HAS_NULLCHECK 0x00000008 // Method contains null check. #define OMF_HAS_FATPOINTER 0x00000010 // Method contains call, that needs fat pointer transformation. #define OMF_HAS_OBJSTACKALLOC 0x00000020 // Method contains an object allocated on the stack. #define OMF_HAS_GUARDEDDEVIRT 0x00000040 // Method contains guarded devirtualization candidate #define OMF_HAS_EXPRUNTIMELOOKUP 0x00000080 // Method contains a runtime lookup to an expandable dictionary. #define OMF_HAS_PATCHPOINT 0x00000100 // Method contains patchpoints #define OMF_NEEDS_GCPOLLS 0x00000200 // Method needs GC polls #define OMF_HAS_FROZEN_STRING 0x00000400 // Method has a frozen string (REF constant int), currently only on CoreRT. #define OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT 0x00000800 // Method contains partial compilation patchpoints #define OMF_HAS_TAILCALL_SUCCESSOR 0x00001000 // Method has potential tail call in a non BBJ_RETURN block bool doesMethodHaveFatPointer() { return (optMethodFlags & OMF_HAS_FATPOINTER) != 0; } void setMethodHasFatPointer() { optMethodFlags |= OMF_HAS_FATPOINTER; } void clearMethodHasFatPointer() { optMethodFlags &= ~OMF_HAS_FATPOINTER; } void addFatPointerCandidate(GenTreeCall* call); bool doesMethodHaveFrozenString() const { return (optMethodFlags & OMF_HAS_FROZEN_STRING) != 0; } void setMethodHasFrozenString() { optMethodFlags |= OMF_HAS_FROZEN_STRING; } bool doesMethodHaveGuardedDevirtualization() const { return (optMethodFlags & OMF_HAS_GUARDEDDEVIRT) != 0; } void setMethodHasGuardedDevirtualization() { optMethodFlags |= OMF_HAS_GUARDEDDEVIRT; } void clearMethodHasGuardedDevirtualization() { optMethodFlags &= ~OMF_HAS_GUARDEDDEVIRT; } void considerGuardedDevirtualization(GenTreeCall* call, IL_OFFSET ilOffset, bool isInterface, CORINFO_METHOD_HANDLE baseMethod, CORINFO_CLASS_HANDLE baseClass, CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass) DEBUGARG(const char* objClassName)); void addGuardedDevirtualizationCandidate(GenTreeCall* call, CORINFO_METHOD_HANDLE methodHandle, CORINFO_CLASS_HANDLE classHandle, unsigned methodAttr, unsigned classAttr, unsigned likelihood); bool doesMethodHaveExpRuntimeLookup() { return (optMethodFlags & OMF_HAS_EXPRUNTIMELOOKUP) != 0; } void setMethodHasExpRuntimeLookup() { optMethodFlags |= OMF_HAS_EXPRUNTIMELOOKUP; } void clearMethodHasExpRuntimeLookup() { optMethodFlags &= ~OMF_HAS_EXPRUNTIMELOOKUP; } void addExpRuntimeLookupCandidate(GenTreeCall* call); bool doesMethodHavePatchpoints() { return (optMethodFlags & OMF_HAS_PATCHPOINT) != 0; } void setMethodHasPatchpoint() { optMethodFlags |= OMF_HAS_PATCHPOINT; } bool doesMethodHavePartialCompilationPatchpoints() { return (optMethodFlags & OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT) != 0; } void setMethodHasPartialCompilationPatchpoint() { optMethodFlags |= OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT; } unsigned optMethodFlags; bool doesMethodHaveNoReturnCalls() { return optNoReturnCallCount > 0; } void setMethodHasNoReturnCalls() { optNoReturnCallCount++; } unsigned optNoReturnCallCount; // Recursion bound controls how far we can go backwards tracking for a SSA value. // No throughput diff was found with backward walk bound between 3-8. static const int optEarlyPropRecurBound = 5; enum class optPropKind { OPK_INVALID, OPK_ARRAYLEN, OPK_NULLCHECK }; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, GenTree*> LocalNumberToNullCheckTreeMap; GenTree* getArrayLengthFromAllocation(GenTree* tree DEBUGARG(BasicBlock* block)); GenTree* optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPropKind valueKind, int walkDepth); GenTree* optPropGetValue(unsigned lclNum, unsigned ssaNum, optPropKind valueKind); GenTree* optEarlyPropRewriteTree(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); bool optDoEarlyPropForBlock(BasicBlock* block); bool optDoEarlyPropForFunc(); void optEarlyProp(); void optFoldNullCheck(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); GenTree* optFindNullCheckToFold(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); bool optIsNullCheckFoldingLegal(GenTree* tree, GenTree* nullCheckTree, GenTree** nullCheckParent, Statement** nullCheckStmt); bool optCanMoveNullCheckPastTree(GenTree* tree, unsigned nullCheckLclNum, bool isInsideTry, bool checkSideEffectSummary); #if DEBUG void optCheckFlagsAreSet(unsigned methodFlag, const char* methodFlagStr, unsigned bbFlag, const char* bbFlagStr, GenTree* tree, BasicBlock* basicBlock); #endif // Redundant branch opts // PhaseStatus optRedundantBranches(); bool optRedundantRelop(BasicBlock* const block); bool optRedundantBranch(BasicBlock* const block); bool optJumpThread(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop); bool optReachable(BasicBlock* const fromBlock, BasicBlock* const toBlock, BasicBlock* const excludedBlock); /************************************************************************** * Value/Assertion propagation *************************************************************************/ public: // Data structures for assertion prop BitVecTraits* apTraits; ASSERT_TP apFull; enum optAssertionKind { OAK_INVALID, OAK_EQUAL, OAK_NOT_EQUAL, OAK_SUBRANGE, OAK_NO_THROW, OAK_COUNT }; enum optOp1Kind { O1K_INVALID, O1K_LCLVAR, O1K_ARR_BND, O1K_BOUND_OPER_BND, O1K_BOUND_LOOP_BND, O1K_CONSTANT_LOOP_BND, O1K_CONSTANT_LOOP_BND_UN, O1K_EXACT_TYPE, O1K_SUBTYPE, O1K_VALUE_NUMBER, O1K_COUNT }; enum optOp2Kind { O2K_INVALID, O2K_LCLVAR_COPY, O2K_IND_CNS_INT, O2K_CONST_INT, O2K_CONST_LONG, O2K_CONST_DOUBLE, O2K_ZEROOBJ, O2K_SUBRANGE, O2K_COUNT }; struct AssertionDsc { optAssertionKind assertionKind; struct SsaVar { unsigned lclNum; // assigned to or property of this local var number unsigned ssaNum; }; struct ArrBnd { ValueNum vnIdx; ValueNum vnLen; }; struct AssertionDscOp1 { optOp1Kind kind; // a normal LclVar, or Exact-type or Subtype ValueNum vn; union { SsaVar lcl; ArrBnd bnd; }; } op1; struct AssertionDscOp2 { optOp2Kind kind; // a const or copy assignment ValueNum vn; struct IntVal { ssize_t iconVal; // integer #if !defined(HOST_64BIT) unsigned padding; // unused; ensures iconFlags does not overlap lconVal #endif GenTreeFlags iconFlags; // gtFlags }; union { struct { SsaVar lcl; FieldSeqNode* zeroOffsetFieldSeq; }; IntVal u1; __int64 lconVal; double dconVal; IntegralRange u2; }; } op2; bool IsCheckedBoundArithBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_OPER_BND); } bool IsCheckedBoundBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_LOOP_BND); } bool IsConstantBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && (op1.kind == O1K_CONSTANT_LOOP_BND)); } bool IsConstantBoundUnsigned() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && (op1.kind == O1K_CONSTANT_LOOP_BND_UN)); } bool IsBoundsCheckNoThrow() { return ((assertionKind == OAK_NO_THROW) && (op1.kind == O1K_ARR_BND)); } bool IsCopyAssertion() { return ((assertionKind == OAK_EQUAL) && (op1.kind == O1K_LCLVAR) && (op2.kind == O2K_LCLVAR_COPY)); } bool IsConstantInt32Assertion() { return ((assertionKind == OAK_EQUAL) || (assertionKind == OAK_NOT_EQUAL)) && (op2.kind == O2K_CONST_INT); } static bool SameKind(AssertionDsc* a1, AssertionDsc* a2) { return a1->assertionKind == a2->assertionKind && a1->op1.kind == a2->op1.kind && a1->op2.kind == a2->op2.kind; } static bool ComplementaryKind(optAssertionKind kind, optAssertionKind kind2) { if (kind == OAK_EQUAL) { return kind2 == OAK_NOT_EQUAL; } else if (kind == OAK_NOT_EQUAL) { return kind2 == OAK_EQUAL; } return false; } bool HasSameOp1(AssertionDsc* that, bool vnBased) { if (op1.kind != that->op1.kind) { return false; } else if (op1.kind == O1K_ARR_BND) { assert(vnBased); return (op1.bnd.vnIdx == that->op1.bnd.vnIdx) && (op1.bnd.vnLen == that->op1.bnd.vnLen); } else { return ((vnBased && (op1.vn == that->op1.vn)) || (!vnBased && (op1.lcl.lclNum == that->op1.lcl.lclNum))); } } bool HasSameOp2(AssertionDsc* that, bool vnBased) { if (op2.kind != that->op2.kind) { return false; } switch (op2.kind) { case O2K_IND_CNS_INT: case O2K_CONST_INT: return ((op2.u1.iconVal == that->op2.u1.iconVal) && (op2.u1.iconFlags == that->op2.u1.iconFlags)); case O2K_CONST_LONG: return (op2.lconVal == that->op2.lconVal); case O2K_CONST_DOUBLE: // exact match because of positive and negative zero. return (memcmp(&op2.dconVal, &that->op2.dconVal, sizeof(double)) == 0); case O2K_ZEROOBJ: return true; case O2K_LCLVAR_COPY: return (op2.lcl.lclNum == that->op2.lcl.lclNum) && (!vnBased || op2.lcl.ssaNum == that->op2.lcl.ssaNum) && (op2.zeroOffsetFieldSeq == that->op2.zeroOffsetFieldSeq); case O2K_SUBRANGE: return op2.u2.Equals(that->op2.u2); case O2K_INVALID: // we will return false break; default: assert(!"Unexpected value for op2.kind in AssertionDsc."); break; } return false; } bool Complementary(AssertionDsc* that, bool vnBased) { return ComplementaryKind(assertionKind, that->assertionKind) && HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased); } bool Equals(AssertionDsc* that, bool vnBased) { if (assertionKind != that->assertionKind) { return false; } else if (assertionKind == OAK_NO_THROW) { assert(op2.kind == O2K_INVALID); return HasSameOp1(that, vnBased); } else { return HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased); } } }; protected: static fgWalkPreFn optAddCopiesCallback; static fgWalkPreFn optVNAssertionPropCurStmtVisitor; unsigned optAddCopyLclNum; GenTree* optAddCopyAsgnNode; bool optLocalAssertionProp; // indicates that we are performing local assertion prop bool optAssertionPropagated; // set to true if we modified the trees bool optAssertionPropagatedCurrentStmt; #ifdef DEBUG GenTree* optAssertionPropCurrentTree; #endif AssertionIndex* optComplementaryAssertionMap; JitExpandArray<ASSERT_TP>* optAssertionDep; // table that holds dependent assertions (assertions // using the value of a local var) for each local var AssertionDsc* optAssertionTabPrivate; // table that holds info about value assignments AssertionIndex optAssertionCount; // total number of assertions in the assertion table AssertionIndex optMaxAssertionCount; public: void optVnNonNullPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); fgWalkResult optVNConstantPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); GenTree* optVNConstantPropOnJTrue(BasicBlock* block, GenTree* test); GenTree* optVNConstantPropOnTree(BasicBlock* block, GenTree* tree); GenTree* optExtractSideEffListFromConst(GenTree* tree); AssertionIndex GetAssertionCount() { return optAssertionCount; } ASSERT_TP* bbJtrueAssertionOut; typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, ASSERT_TP> ValueNumToAssertsMap; ValueNumToAssertsMap* optValueNumToAsserts; // Assertion prop helpers. ASSERT_TP& GetAssertionDep(unsigned lclNum); AssertionDsc* optGetAssertion(AssertionIndex assertIndex); void optAssertionInit(bool isLocalProp); void optAssertionTraitsInit(AssertionIndex assertionCount); void optAssertionReset(AssertionIndex limit); void optAssertionRemove(AssertionIndex index); // Assertion prop data flow functions. void optAssertionPropMain(); Statement* optVNAssertionPropCurStmt(BasicBlock* block, Statement* stmt); bool optIsTreeKnownIntValue(bool vnBased, GenTree* tree, ssize_t* pConstant, GenTreeFlags* pIconFlags); ASSERT_TP* optInitAssertionDataflowFlags(); ASSERT_TP* optComputeAssertionGen(); // Assertion Gen functions. void optAssertionGen(GenTree* tree); AssertionIndex optAssertionGenCast(GenTreeCast* cast); AssertionIndex optAssertionGenPhiDefn(GenTree* tree); AssertionInfo optCreateJTrueBoundsAssertion(GenTree* tree); AssertionInfo optAssertionGenJtrue(GenTree* tree); AssertionIndex optCreateJtrueAssertions(GenTree* op1, GenTree* op2, Compiler::optAssertionKind assertionKind, bool helperCallArgs = false); AssertionIndex optFindComplementary(AssertionIndex assertionIndex); void optMapComplementary(AssertionIndex assertionIndex, AssertionIndex index); // Assertion creation functions. AssertionIndex optCreateAssertion(GenTree* op1, GenTree* op2, optAssertionKind assertionKind, bool helperCallArgs = false); AssertionIndex optFinalizeCreatingAssertion(AssertionDsc* assertion); bool optTryExtractSubrangeAssertion(GenTree* source, IntegralRange* pRange); void optCreateComplementaryAssertion(AssertionIndex assertionIndex, GenTree* op1, GenTree* op2, bool helperCallArgs = false); bool optAssertionVnInvolvesNan(AssertionDsc* assertion); AssertionIndex optAddAssertion(AssertionDsc* assertion); void optAddVnAssertionMapping(ValueNum vn, AssertionIndex index); #ifdef DEBUG void optPrintVnAssertionMapping(); #endif ASSERT_TP optGetVnMappedAssertions(ValueNum vn); // Used for respective assertion propagations. AssertionIndex optAssertionIsSubrange(GenTree* tree, IntegralRange range, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsSubtype(GenTree* tree, GenTree* methodTableArg, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsNonNullInternal(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased)); bool optAssertionIsNonNull(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex)); AssertionIndex optGlobalAssertionIsEqualOrNotEqual(ASSERT_VALARG_TP assertions, GenTree* op1, GenTree* op2); AssertionIndex optGlobalAssertionIsEqualOrNotEqualZero(ASSERT_VALARG_TP assertions, GenTree* op1); AssertionIndex optLocalAssertionIsEqualOrNotEqual( optOp1Kind op1Kind, unsigned lclNum, optOp2Kind op2Kind, ssize_t cnsVal, ASSERT_VALARG_TP assertions); // Assertion prop for lcl var functions. bool optAssertionProp_LclVarTypeCheck(GenTree* tree, LclVarDsc* lclVarDsc, LclVarDsc* copyVarDsc); GenTree* optCopyAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, Statement* stmt DEBUGARG(AssertionIndex index)); GenTree* optConstantAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, Statement* stmt DEBUGARG(AssertionIndex index)); bool optZeroObjAssertionProp(GenTree* tree, ASSERT_VALARG_TP assertions); // Assertion propagation functions. GenTree* optAssertionProp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt, BasicBlock* block); GenTree* optAssertionProp_LclVar(ASSERT_VALARG_TP assertions, GenTreeLclVarCommon* tree, Statement* stmt); GenTree* optAssertionProp_Asg(ASSERT_VALARG_TP assertions, GenTreeOp* asg, Statement* stmt); GenTree* optAssertionProp_Return(ASSERT_VALARG_TP assertions, GenTreeUnOp* ret, Statement* stmt); GenTree* optAssertionProp_Ind(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Cast(ASSERT_VALARG_TP assertions, GenTreeCast* cast, Statement* stmt); GenTree* optAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call, Statement* stmt); GenTree* optAssertionProp_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Comma(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_BndsChk(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Update(GenTree* newTree, GenTree* tree, Statement* stmt); GenTree* optNonNullAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call); // Implied assertion functions. void optImpliedAssertions(AssertionIndex assertionIndex, ASSERT_TP& activeAssertions); void optImpliedByTypeOfAssertions(ASSERT_TP& activeAssertions); void optImpliedByCopyAssertion(AssertionDsc* copyAssertion, AssertionDsc* depAssertion, ASSERT_TP& result); void optImpliedByConstAssertion(AssertionDsc* curAssertion, ASSERT_TP& result); #ifdef DEBUG void optPrintAssertion(AssertionDsc* newAssertion, AssertionIndex assertionIndex = 0); void optPrintAssertionIndex(AssertionIndex index); void optPrintAssertionIndices(ASSERT_TP assertions); void optDebugCheckAssertion(AssertionDsc* assertion); void optDebugCheckAssertions(AssertionIndex AssertionIndex); #endif static void optDumpAssertionIndices(const char* header, ASSERT_TP assertions, const char* footer = nullptr); static void optDumpAssertionIndices(ASSERT_TP assertions, const char* footer = nullptr); void optAddCopies(); /************************************************************************** * Range checks *************************************************************************/ public: struct LoopCloneVisitorInfo { LoopCloneContext* context; unsigned loopNum; Statement* stmt; LoopCloneVisitorInfo(LoopCloneContext* context, unsigned loopNum, Statement* stmt) : context(context), loopNum(loopNum), stmt(nullptr) { } }; bool optIsStackLocalInvariant(unsigned loopNum, unsigned lclNum); bool optExtractArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum); bool optReconstructArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum); bool optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneContext* context); static fgWalkPreFn optCanOptimizeByLoopCloningVisitor; fgWalkResult optCanOptimizeByLoopCloning(GenTree* tree, LoopCloneVisitorInfo* info); bool optObtainLoopCloningOpts(LoopCloneContext* context); bool optIsLoopClonable(unsigned loopInd); bool optLoopCloningEnabled(); #ifdef DEBUG void optDebugLogLoopCloning(BasicBlock* block, Statement* insertBefore); #endif void optPerformStaticOptimizations(unsigned loopNum, LoopCloneContext* context DEBUGARG(bool fastPath)); bool optComputeDerefConditions(unsigned loopNum, LoopCloneContext* context); bool optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext* context); BasicBlock* optInsertLoopChoiceConditions(LoopCloneContext* context, unsigned loopNum, BasicBlock* slowHead, BasicBlock* insertAfter); protected: ssize_t optGetArrayRefScaleAndIndex(GenTree* mul, GenTree** pIndex DEBUGARG(bool bRngChk)); bool optReachWithoutCall(BasicBlock* srcBB, BasicBlock* dstBB); protected: bool optLoopsMarked; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX RegAlloc XX XX XX XX Does the register allocation and puts the remaining lclVars on the stack XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: regNumber raUpdateRegStateForArg(RegState* regState, LclVarDsc* argDsc); void raMarkStkVars(); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE #if defined(TARGET_AMD64) static bool varTypeNeedsPartialCalleeSave(var_types type) { assert(type != TYP_STRUCT); return (type == TYP_SIMD32); } #elif defined(TARGET_ARM64) static bool varTypeNeedsPartialCalleeSave(var_types type) { assert(type != TYP_STRUCT); // ARM64 ABI FP Callee save registers only require Callee to save lower 8 Bytes // For SIMD types longer than 8 bytes Caller is responsible for saving and restoring Upper bytes. return ((type == TYP_SIMD16) || (type == TYP_SIMD12)); } #else // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) #error("Unknown target architecture for FEATURE_SIMD") #endif // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE protected: // Some things are used by both LSRA and regpredict allocators. FrameType rpFrameType; bool rpMustCreateEBPCalled; // Set to true after we have called rpMustCreateEBPFrame once bool rpMustCreateEBPFrame(INDEBUG(const char** wbReason)); private: Lowering* m_pLowering; // Lowering; needed to Lower IR that's added or modified after Lowering. LinearScanInterface* m_pLinearScan; // Linear Scan allocator /* raIsVarargsStackArg is called by raMaskStkVars and by lvaComputeRefCounts. It identifies the special case where a varargs function has a parameter passed on the stack, other than the special varargs handle. Such parameters require special treatment, because they cannot be tracked by the GC (their offsets in the stack are not known at compile time). */ bool raIsVarargsStackArg(unsigned lclNum) { #ifdef TARGET_X86 LclVarDsc* varDsc = lvaGetDesc(lclNum); assert(varDsc->lvIsParam); return (info.compIsVarArgs && !varDsc->lvIsRegArg && (lclNum != lvaVarargsHandleArg)); #else // TARGET_X86 return false; #endif // TARGET_X86 } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX EEInterface XX XX XX XX Get to the class and method info from the Execution Engine given XX XX tokens for the class and method XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // Get handles void eeGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedToken, CORINFO_CALLINFO_FLAGS flags, CORINFO_CALL_INFO* pResult); void eeGetFieldInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS flags, CORINFO_FIELD_INFO* pResult); // Get the flags bool eeIsValueClass(CORINFO_CLASS_HANDLE clsHnd); bool eeIsIntrinsic(CORINFO_METHOD_HANDLE ftn); bool eeIsFieldStatic(CORINFO_FIELD_HANDLE fldHnd); var_types eeGetFieldType(CORINFO_FIELD_HANDLE fldHnd, CORINFO_CLASS_HANDLE* pStructHnd = nullptr); #if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) || defined(TRACK_LSRA_STATS) const char* eeGetMethodName(CORINFO_METHOD_HANDLE hnd, const char** className); const char* eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd); unsigned compMethodHash(CORINFO_METHOD_HANDLE methodHandle); bool eeIsNativeMethod(CORINFO_METHOD_HANDLE method); CORINFO_METHOD_HANDLE eeGetMethodHandleForNative(CORINFO_METHOD_HANDLE method); #endif var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig, bool* isPinned); CORINFO_CLASS_HANDLE eeGetArgClass(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE list); CORINFO_CLASS_HANDLE eeGetClassFromContext(CORINFO_CONTEXT_HANDLE context); unsigned eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); static unsigned eeGetArgSizeAlignment(var_types type, bool isFloatHfa); // VOM info, method sigs void eeGetSig(unsigned sigTok, CORINFO_MODULE_HANDLE scope, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO* retSig); void eeGetCallSiteSig(unsigned sigTok, CORINFO_MODULE_HANDLE scope, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO* retSig); void eeGetMethodSig(CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* retSig, CORINFO_CLASS_HANDLE owner = nullptr); // Method entry-points, instrs CORINFO_METHOD_HANDLE eeMarkNativeTarget(CORINFO_METHOD_HANDLE method); CORINFO_EE_INFO eeInfo; bool eeInfoInitialized; CORINFO_EE_INFO* eeGetEEInfo(); // Gets the offset of a SDArray's first element static unsigned eeGetArrayDataOffset(); // Get the offset of a MDArray's first element static unsigned eeGetMDArrayDataOffset(unsigned rank); // Get the offset of a MDArray's dimension length for a given dimension. static unsigned eeGetMDArrayLengthOffset(unsigned rank, unsigned dimension); // Get the offset of a MDArray's lower bound for a given dimension. static unsigned eeGetMDArrayLowerBoundOffset(unsigned rank, unsigned dimension); GenTree* eeGetPInvokeCookie(CORINFO_SIG_INFO* szMetaSig); // Returns the page size for the target machine as reported by the EE. target_size_t eeGetPageSize() { return (target_size_t)eeGetEEInfo()->osPageSize; } //------------------------------------------------------------------------ // VirtualStubParam: virtual stub dispatch extra parameter (slot address). // // It represents Abi and target specific registers for the parameter. // class VirtualStubParamInfo { public: VirtualStubParamInfo(bool isCoreRTABI) { #if defined(TARGET_X86) reg = REG_EAX; regMask = RBM_EAX; #elif defined(TARGET_AMD64) if (isCoreRTABI) { reg = REG_R10; regMask = RBM_R10; } else { reg = REG_R11; regMask = RBM_R11; } #elif defined(TARGET_ARM) if (isCoreRTABI) { reg = REG_R12; regMask = RBM_R12; } else { reg = REG_R4; regMask = RBM_R4; } #elif defined(TARGET_ARM64) reg = REG_R11; regMask = RBM_R11; #else #error Unsupported or unset target architecture #endif } regNumber GetReg() const { return reg; } _regMask_enum GetRegMask() const { return regMask; } private: regNumber reg; _regMask_enum regMask; }; VirtualStubParamInfo* virtualStubParamInfo; bool IsTargetAbi(CORINFO_RUNTIME_ABI abi) { return eeGetEEInfo()->targetAbi == abi; } bool generateCFIUnwindCodes() { #if defined(FEATURE_CFI_SUPPORT) return TargetOS::IsUnix && IsTargetAbi(CORINFO_CORERT_ABI); #else return false; #endif } // Debugging support - Line number info void eeGetStmtOffsets(); unsigned eeBoundariesCount; ICorDebugInfo::OffsetMapping* eeBoundaries; // Boundaries to report to the EE void eeSetLIcount(unsigned count); void eeSetLIinfo(unsigned which, UNATIVE_OFFSET offs, IPmappingDscKind kind, const ILLocation& loc); void eeSetLIdone(); #ifdef DEBUG static void eeDispILOffs(IL_OFFSET offs); static void eeDispSourceMappingOffs(uint32_t offs); static void eeDispLineInfo(const ICorDebugInfo::OffsetMapping* line); void eeDispLineInfos(); #endif // DEBUG // Debugging support - Local var info void eeGetVars(); unsigned eeVarsCount; struct VarResultInfo { UNATIVE_OFFSET startOffset; UNATIVE_OFFSET endOffset; DWORD varNumber; CodeGenInterface::siVarLoc loc; } * eeVars; void eeSetLVcount(unsigned count); void eeSetLVinfo(unsigned which, UNATIVE_OFFSET startOffs, UNATIVE_OFFSET length, unsigned varNum, const CodeGenInterface::siVarLoc& loc); void eeSetLVdone(); #ifdef DEBUG void eeDispVar(ICorDebugInfo::NativeVarInfo* var); void eeDispVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo::NativeVarInfo* vars); #endif // DEBUG // ICorJitInfo wrappers void eeReserveUnwindInfo(bool isFunclet, bool isColdCode, ULONG unwindSize); void eeAllocUnwindInfo(BYTE* pHotCode, BYTE* pColdCode, ULONG startOffset, ULONG endOffset, ULONG unwindSize, BYTE* pUnwindBlock, CorJitFuncKind funcKind); void eeSetEHcount(unsigned cEH); void eeSetEHinfo(unsigned EHnumber, const CORINFO_EH_CLAUSE* clause); WORD eeGetRelocTypeHint(void* target); // ICorStaticInfo wrapper functions bool eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken); #if defined(UNIX_AMD64_ABI) #ifdef DEBUG static void dumpSystemVClassificationType(SystemVClassificationType ct); #endif // DEBUG void eeGetSystemVAmd64PassStructInRegisterDescriptor( /*IN*/ CORINFO_CLASS_HANDLE structHnd, /*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr); #endif // UNIX_AMD64_ABI template <typename ParamType> bool eeRunWithErrorTrap(void (*function)(ParamType*), ParamType* param) { return eeRunWithErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param)); } bool eeRunWithErrorTrapImp(void (*function)(void*), void* param); template <typename ParamType> bool eeRunWithSPMIErrorTrap(void (*function)(ParamType*), ParamType* param) { return eeRunWithSPMIErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param)); } bool eeRunWithSPMIErrorTrapImp(void (*function)(void*), void* param); // Utility functions const char* eeGetFieldName(CORINFO_FIELD_HANDLE fieldHnd, const char** classNamePtr = nullptr); #if defined(DEBUG) const WCHAR* eeGetCPString(size_t stringHandle); #endif const char* eeGetClassName(CORINFO_CLASS_HANDLE clsHnd); static CORINFO_METHOD_HANDLE eeFindHelper(unsigned helper); static CorInfoHelpFunc eeGetHelperNum(CORINFO_METHOD_HANDLE method); static bool IsSharedStaticHelper(GenTree* tree); static bool IsGcSafePoint(GenTreeCall* call); static CORINFO_FIELD_HANDLE eeFindJitDataOffs(unsigned jitDataOffs); // returns true/false if 'field' is a Jit Data offset static bool eeIsJitDataOffs(CORINFO_FIELD_HANDLE field); // returns a number < 0 if 'field' is not a Jit Data offset, otherwise the data offset (limited to 2GB) static int eeGetJitDataOffs(CORINFO_FIELD_HANDLE field); /*****************************************************************************/ /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX CodeGenerator XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: CodeGenInterface* codeGen; // Record the instr offset mapping to the generated code jitstd::list<IPmappingDsc> genIPmappings; #ifdef DEBUG jitstd::list<PreciseIPMapping> genPreciseIPmappings; #endif // Managed RetVal - A side hash table meant to record the mapping from a // GT_CALL node to its debug info. This info is used to emit sequence points // that can be used by debugger to determine the native offset at which the // managed RetVal will be available. // // In fact we can store debug info in a GT_CALL node. This was ruled out in // favor of a side table for two reasons: 1) We need debug info for only those // GT_CALL nodes (created during importation) that correspond to an IL call and // whose return type is other than TYP_VOID. 2) GT_CALL node is a frequently used // structure and IL offset is needed only when generating debuggable code. Therefore // it is desirable to avoid memory size penalty in retail scenarios. typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, DebugInfo> CallSiteDebugInfoTable; CallSiteDebugInfoTable* genCallSite2DebugInfoMap; unsigned genReturnLocal; // Local number for the return value when applicable. BasicBlock* genReturnBB; // jumped to when not optimizing for speed. // The following properties are part of CodeGenContext. Getters are provided here for // convenience and backward compatibility, but the properties can only be set by invoking // the setter on CodeGenContext directly. emitter* GetEmitter() const { return codeGen->GetEmitter(); } bool isFramePointerUsed() const { return codeGen->isFramePointerUsed(); } bool GetInterruptible() { return codeGen->GetInterruptible(); } void SetInterruptible(bool value) { codeGen->SetInterruptible(value); } #if DOUBLE_ALIGN const bool genDoubleAlign() { return codeGen->doDoubleAlign(); } DWORD getCanDoubleAlign(); bool shouldDoubleAlign(unsigned refCntStk, unsigned refCntReg, weight_t refCntWtdReg, unsigned refCntStkParam, weight_t refCntWtdStkDbl); #endif // DOUBLE_ALIGN bool IsFullPtrRegMapRequired() { return codeGen->IsFullPtrRegMapRequired(); } void SetFullPtrRegMapRequired(bool value) { codeGen->SetFullPtrRegMapRequired(value); } // Things that MAY belong either in CodeGen or CodeGenContext #if defined(FEATURE_EH_FUNCLETS) FuncInfoDsc* compFuncInfos; unsigned short compCurrFuncIdx; unsigned short compFuncInfoCount; unsigned short compFuncCount() { assert(fgFuncletsCreated); return compFuncInfoCount; } #else // !FEATURE_EH_FUNCLETS // This is a no-op when there are no funclets! void genUpdateCurrentFunclet(BasicBlock* block) { return; } FuncInfoDsc compFuncInfoRoot; static const unsigned compCurrFuncIdx = 0; unsigned short compFuncCount() { return 1; } #endif // !FEATURE_EH_FUNCLETS FuncInfoDsc* funCurrentFunc(); void funSetCurrentFunc(unsigned funcIdx); FuncInfoDsc* funGetFunc(unsigned funcIdx); unsigned int funGetFuncIdx(BasicBlock* block); // LIVENESS VARSET_TP compCurLife; // current live variables GenTree* compCurLifeTree; // node after which compCurLife has been computed // Compare the given "newLife" with last set of live variables and update // codeGen "gcInfo", siScopes, "regSet" with the new variable's homes/liveness. template <bool ForCodeGen> void compChangeLife(VARSET_VALARG_TP newLife); // Update the GC's masks, register's masks and reports change on variable's homes given a set of // current live variables if changes have happened since "compCurLife". template <bool ForCodeGen> inline void compUpdateLife(VARSET_VALARG_TP newLife); // Gets a register mask that represent the kill set for a helper call since // not all JIT Helper calls follow the standard ABI on the target architecture. regMaskTP compHelperCallKillSet(CorInfoHelpFunc helper); #ifdef TARGET_ARM // Requires that "varDsc" be a promoted struct local variable being passed as an argument, beginning at // "firstArgRegNum", which is assumed to have already been aligned to the register alignment restriction of the // struct type. Adds bits to "*pArgSkippedRegMask" for any argument registers *not* used in passing "varDsc" -- // i.e., internal "holes" caused by internal alignment constraints. For example, if the struct contained an int and // a double, and we at R0 (on ARM), then R1 would be skipped, and the bit for R1 would be added to the mask. void fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, regMaskTP* pArgSkippedRegMask); #endif // TARGET_ARM // If "tree" is a indirection (GT_IND, or GT_OBJ) whose arg is an ADDR, whose arg is a LCL_VAR, return that LCL_VAR // node, else NULL. static GenTreeLclVar* fgIsIndirOfAddrOfLocal(GenTree* tree); // This map is indexed by GT_OBJ nodes that are address of promoted struct variables, which // have been annotated with the GTF_VAR_DEATH flag. If such a node is *not* mapped in this // table, one may assume that all the (tracked) field vars die at this GT_OBJ. Otherwise, // the node maps to a pointer to a VARSET_TP, containing set bits for each of the tracked field // vars of the promoted struct local that go dead at the given node (the set bits are the bits // for the tracked var indices of the field vars, as in a live var set). // // The map is allocated on demand so all map operations should use one of the following three // wrapper methods. NodeToVarsetPtrMap* m_promotedStructDeathVars; NodeToVarsetPtrMap* GetPromotedStructDeathVars() { if (m_promotedStructDeathVars == nullptr) { m_promotedStructDeathVars = new (getAllocator()) NodeToVarsetPtrMap(getAllocator()); } return m_promotedStructDeathVars; } void ClearPromotedStructDeathVars() { if (m_promotedStructDeathVars != nullptr) { m_promotedStructDeathVars->RemoveAll(); } } bool LookupPromotedStructDeathVars(GenTree* tree, VARSET_TP** bits) { *bits = nullptr; bool result = false; if (m_promotedStructDeathVars != nullptr) { result = m_promotedStructDeathVars->Lookup(tree, bits); } return result; } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX UnwindInfo XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #if !defined(__GNUC__) #pragma region Unwind information #endif public: // // Infrastructure functions: start/stop/reserve/emit. // void unwindBegProlog(); void unwindEndProlog(); void unwindBegEpilog(); void unwindEndEpilog(); void unwindReserve(); void unwindEmit(void* pHotCode, void* pColdCode); // // Specific unwind information functions: called by code generation to indicate a particular // prolog or epilog unwindable instruction has been generated. // void unwindPush(regNumber reg); void unwindAllocStack(unsigned size); void unwindSetFrameReg(regNumber reg, unsigned offset); void unwindSaveReg(regNumber reg, unsigned offset); #if defined(TARGET_ARM) void unwindPushMaskInt(regMaskTP mask); void unwindPushMaskFloat(regMaskTP mask); void unwindPopMaskInt(regMaskTP mask); void unwindPopMaskFloat(regMaskTP mask); void unwindBranch16(); // The epilog terminates with a 16-bit branch (e.g., "bx lr") void unwindNop(unsigned codeSizeInBytes); // Generate unwind NOP code. 'codeSizeInBytes' is 2 or 4 bytes. Only // called via unwindPadding(). void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last // instruction and the current location. #endif // TARGET_ARM #if defined(TARGET_ARM64) void unwindNop(); void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last // instruction and the current location. void unwindSaveReg(regNumber reg, int offset); // str reg, [sp, #offset] void unwindSaveRegPreindexed(regNumber reg, int offset); // str reg, [sp, #offset]! void unwindSaveRegPair(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset] void unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]! void unwindSaveNext(); // unwind code: save_next void unwindReturn(regNumber reg); // ret lr #endif // defined(TARGET_ARM64) // // Private "helper" functions for the unwind implementation. // private: #if defined(FEATURE_EH_FUNCLETS) void unwindGetFuncLocations(FuncInfoDsc* func, bool getHotSectionData, /* OUT */ emitLocation** ppStartLoc, /* OUT */ emitLocation** ppEndLoc); #endif // FEATURE_EH_FUNCLETS void unwindReserveFunc(FuncInfoDsc* func); void unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode); #if defined(TARGET_AMD64) || (defined(TARGET_X86) && defined(FEATURE_EH_FUNCLETS)) void unwindReserveFuncHelper(FuncInfoDsc* func, bool isHotCode); void unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pColdCode, bool isHotCode); #endif // TARGET_AMD64 || (TARGET_X86 && FEATURE_EH_FUNCLETS) UNATIVE_OFFSET unwindGetCurrentOffset(FuncInfoDsc* func); #if defined(TARGET_AMD64) void unwindBegPrologWindows(); void unwindPushWindows(regNumber reg); void unwindAllocStackWindows(unsigned size); void unwindSetFrameRegWindows(regNumber reg, unsigned offset); void unwindSaveRegWindows(regNumber reg, unsigned offset); #ifdef UNIX_AMD64_ABI void unwindSaveRegCFI(regNumber reg, unsigned offset); #endif // UNIX_AMD64_ABI #elif defined(TARGET_ARM) void unwindPushPopMaskInt(regMaskTP mask, bool useOpsize16); void unwindPushPopMaskFloat(regMaskTP mask); #endif // TARGET_ARM #if defined(FEATURE_CFI_SUPPORT) short mapRegNumToDwarfReg(regNumber reg); void createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR opcode, short dwarfReg, INT offset = 0); void unwindPushPopCFI(regNumber reg); void unwindBegPrologCFI(); void unwindPushPopMaskCFI(regMaskTP regMask, bool isFloat); void unwindAllocStackCFI(unsigned size); void unwindSetFrameRegCFI(regNumber reg, unsigned offset); void unwindEmitFuncCFI(FuncInfoDsc* func, void* pHotCode, void* pColdCode); #ifdef DEBUG void DumpCfiInfo(bool isHotCode, UNATIVE_OFFSET startOffset, UNATIVE_OFFSET endOffset, DWORD cfiCodeBytes, const CFI_CODE* const pCfiCode); #endif #endif // FEATURE_CFI_SUPPORT #if !defined(__GNUC__) #pragma endregion // Note: region is NOT under !defined(__GNUC__) #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX SIMD XX XX XX XX Info about SIMD types, methods and the SIMD assembly (i.e. the assembly XX XX that contains the distinguished, well-known SIMD type definitions). XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ bool IsBaselineSimdIsaSupported() { #ifdef FEATURE_SIMD #if defined(TARGET_XARCH) CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2; #elif defined(TARGET_ARM64) CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd; #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 return compOpportunisticallyDependsOn(minimumIsa); #else return false; #endif } #if defined(DEBUG) bool IsBaselineSimdIsaSupportedDebugOnly() { #ifdef FEATURE_SIMD #if defined(TARGET_XARCH) CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2; #elif defined(TARGET_ARM64) CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd; #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 return compIsaSupportedDebugOnly(minimumIsa); #else return false; #endif // FEATURE_SIMD } #endif // DEBUG // Get highest available level for SIMD codegen SIMDLevel getSIMDSupportLevel() { #if defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_AVX2)) { return SIMD_AVX2_Supported; } if (compOpportunisticallyDependsOn(InstructionSet_SSE42)) { return SIMD_SSE4_Supported; } // min bar is SSE2 return SIMD_SSE2_Supported; #else assert(!"Available instruction set(s) for SIMD codegen is not defined for target arch"); unreached(); return SIMD_Not_Supported; #endif } bool isIntrinsicType(CORINFO_CLASS_HANDLE clsHnd) { return info.compCompHnd->isIntrinsicType(clsHnd); } const char* getClassNameFromMetadata(CORINFO_CLASS_HANDLE cls, const char** namespaceName) { return info.compCompHnd->getClassNameFromMetadata(cls, namespaceName); } CORINFO_CLASS_HANDLE getTypeInstantiationArgument(CORINFO_CLASS_HANDLE cls, unsigned index) { return info.compCompHnd->getTypeInstantiationArgument(cls, index); } #ifdef FEATURE_SIMD #ifndef TARGET_ARM64 // Should we support SIMD intrinsics? bool featureSIMD; #endif // Should we recognize SIMD types? // We always do this on ARM64 to support HVA types. bool supportSIMDTypes() { #ifdef TARGET_ARM64 return true; #else return featureSIMD; #endif } // Have we identified any SIMD types? // This is currently used by struct promotion to avoid getting type information for a struct // field to see if it is a SIMD type, if we haven't seen any SIMD types or operations in // the method. bool _usesSIMDTypes; bool usesSIMDTypes() { return _usesSIMDTypes; } void setUsesSIMDTypes(bool value) { _usesSIMDTypes = value; } // This is a temp lclVar allocated on the stack as TYP_SIMD. It is used to implement intrinsics // that require indexed access to the individual fields of the vector, which is not well supported // by the hardware. It is allocated when/if such situations are encountered during Lowering. unsigned lvaSIMDInitTempVarNum; struct SIMDHandlesCache { // SIMD Types CORINFO_CLASS_HANDLE SIMDFloatHandle; CORINFO_CLASS_HANDLE SIMDDoubleHandle; CORINFO_CLASS_HANDLE SIMDIntHandle; CORINFO_CLASS_HANDLE SIMDUShortHandle; CORINFO_CLASS_HANDLE SIMDUByteHandle; CORINFO_CLASS_HANDLE SIMDShortHandle; CORINFO_CLASS_HANDLE SIMDByteHandle; CORINFO_CLASS_HANDLE SIMDLongHandle; CORINFO_CLASS_HANDLE SIMDUIntHandle; CORINFO_CLASS_HANDLE SIMDULongHandle; CORINFO_CLASS_HANDLE SIMDNIntHandle; CORINFO_CLASS_HANDLE SIMDNUIntHandle; CORINFO_CLASS_HANDLE SIMDVector2Handle; CORINFO_CLASS_HANDLE SIMDVector3Handle; CORINFO_CLASS_HANDLE SIMDVector4Handle; CORINFO_CLASS_HANDLE SIMDVectorHandle; #ifdef FEATURE_HW_INTRINSICS #if defined(TARGET_ARM64) CORINFO_CLASS_HANDLE Vector64FloatHandle; CORINFO_CLASS_HANDLE Vector64DoubleHandle; CORINFO_CLASS_HANDLE Vector64IntHandle; CORINFO_CLASS_HANDLE Vector64UShortHandle; CORINFO_CLASS_HANDLE Vector64UByteHandle; CORINFO_CLASS_HANDLE Vector64ShortHandle; CORINFO_CLASS_HANDLE Vector64ByteHandle; CORINFO_CLASS_HANDLE Vector64LongHandle; CORINFO_CLASS_HANDLE Vector64UIntHandle; CORINFO_CLASS_HANDLE Vector64ULongHandle; CORINFO_CLASS_HANDLE Vector64NIntHandle; CORINFO_CLASS_HANDLE Vector64NUIntHandle; #endif // defined(TARGET_ARM64) CORINFO_CLASS_HANDLE Vector128FloatHandle; CORINFO_CLASS_HANDLE Vector128DoubleHandle; CORINFO_CLASS_HANDLE Vector128IntHandle; CORINFO_CLASS_HANDLE Vector128UShortHandle; CORINFO_CLASS_HANDLE Vector128UByteHandle; CORINFO_CLASS_HANDLE Vector128ShortHandle; CORINFO_CLASS_HANDLE Vector128ByteHandle; CORINFO_CLASS_HANDLE Vector128LongHandle; CORINFO_CLASS_HANDLE Vector128UIntHandle; CORINFO_CLASS_HANDLE Vector128ULongHandle; CORINFO_CLASS_HANDLE Vector128NIntHandle; CORINFO_CLASS_HANDLE Vector128NUIntHandle; #if defined(TARGET_XARCH) CORINFO_CLASS_HANDLE Vector256FloatHandle; CORINFO_CLASS_HANDLE Vector256DoubleHandle; CORINFO_CLASS_HANDLE Vector256IntHandle; CORINFO_CLASS_HANDLE Vector256UShortHandle; CORINFO_CLASS_HANDLE Vector256UByteHandle; CORINFO_CLASS_HANDLE Vector256ShortHandle; CORINFO_CLASS_HANDLE Vector256ByteHandle; CORINFO_CLASS_HANDLE Vector256LongHandle; CORINFO_CLASS_HANDLE Vector256UIntHandle; CORINFO_CLASS_HANDLE Vector256ULongHandle; CORINFO_CLASS_HANDLE Vector256NIntHandle; CORINFO_CLASS_HANDLE Vector256NUIntHandle; #endif // defined(TARGET_XARCH) #endif // FEATURE_HW_INTRINSICS SIMDHandlesCache() { memset(this, 0, sizeof(*this)); } }; SIMDHandlesCache* m_simdHandleCache; // Get an appropriate "zero" for the given type and class handle. GenTree* gtGetSIMDZero(var_types simdType, CorInfoType simdBaseJitType, CORINFO_CLASS_HANDLE simdHandle); // Get the handle for a SIMD type. CORINFO_CLASS_HANDLE gtGetStructHandleForSIMD(var_types simdType, CorInfoType simdBaseJitType) { if (m_simdHandleCache == nullptr) { // This may happen if the JIT generates SIMD node on its own, without importing them. // Otherwise getBaseJitTypeAndSizeOfSIMDType should have created the cache. return NO_CLASS_HANDLE; } if (simdBaseJitType == CORINFO_TYPE_FLOAT) { switch (simdType) { case TYP_SIMD8: return m_simdHandleCache->SIMDVector2Handle; case TYP_SIMD12: return m_simdHandleCache->SIMDVector3Handle; case TYP_SIMD16: if ((getSIMDVectorType() == TYP_SIMD32) || (m_simdHandleCache->SIMDVector4Handle != NO_CLASS_HANDLE)) { return m_simdHandleCache->SIMDVector4Handle; } break; case TYP_SIMD32: break; default: unreached(); } } assert(emitTypeSize(simdType) <= largestEnregisterableStructSize()); switch (simdBaseJitType) { case CORINFO_TYPE_FLOAT: return m_simdHandleCache->SIMDFloatHandle; case CORINFO_TYPE_DOUBLE: return m_simdHandleCache->SIMDDoubleHandle; case CORINFO_TYPE_INT: return m_simdHandleCache->SIMDIntHandle; case CORINFO_TYPE_USHORT: return m_simdHandleCache->SIMDUShortHandle; case CORINFO_TYPE_UBYTE: return m_simdHandleCache->SIMDUByteHandle; case CORINFO_TYPE_SHORT: return m_simdHandleCache->SIMDShortHandle; case CORINFO_TYPE_BYTE: return m_simdHandleCache->SIMDByteHandle; case CORINFO_TYPE_LONG: return m_simdHandleCache->SIMDLongHandle; case CORINFO_TYPE_UINT: return m_simdHandleCache->SIMDUIntHandle; case CORINFO_TYPE_ULONG: return m_simdHandleCache->SIMDULongHandle; case CORINFO_TYPE_NATIVEINT: return m_simdHandleCache->SIMDNIntHandle; case CORINFO_TYPE_NATIVEUINT: return m_simdHandleCache->SIMDNUIntHandle; default: assert(!"Didn't find a class handle for simdType"); } return NO_CLASS_HANDLE; } // Returns true if this is a SIMD type that should be considered an opaque // vector type (i.e. do not analyze or promote its fields). // Note that all but the fixed vector types are opaque, even though they may // actually be declared as having fields. bool isOpaqueSIMDType(CORINFO_CLASS_HANDLE structHandle) const { return ((m_simdHandleCache != nullptr) && (structHandle != m_simdHandleCache->SIMDVector2Handle) && (structHandle != m_simdHandleCache->SIMDVector3Handle) && (structHandle != m_simdHandleCache->SIMDVector4Handle)); } // Returns true if the tree corresponds to a TYP_SIMD lcl var. // Note that both SIMD vector args and locals are mared as lvSIMDType = true, but // type of an arg node is TYP_BYREF and a local node is TYP_SIMD or TYP_STRUCT. bool isSIMDTypeLocal(GenTree* tree) { return tree->OperIsLocal() && lvaGetDesc(tree->AsLclVarCommon())->lvSIMDType; } // Returns true if the lclVar is an opaque SIMD type. bool isOpaqueSIMDLclVar(const LclVarDsc* varDsc) const { if (!varDsc->lvSIMDType) { return false; } return isOpaqueSIMDType(varDsc->GetStructHnd()); } static bool isRelOpSIMDIntrinsic(SIMDIntrinsicID intrinsicId) { return (intrinsicId == SIMDIntrinsicEqual); } // Returns base JIT type of a TYP_SIMD local. // Returns CORINFO_TYPE_UNDEF if the local is not TYP_SIMD. CorInfoType getBaseJitTypeOfSIMDLocal(GenTree* tree) { if (isSIMDTypeLocal(tree)) { return lvaGetDesc(tree->AsLclVarCommon())->GetSimdBaseJitType(); } return CORINFO_TYPE_UNDEF; } bool isSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { if (isIntrinsicType(clsHnd)) { const char* namespaceName = nullptr; (void)getClassNameFromMetadata(clsHnd, &namespaceName); return strcmp(namespaceName, "System.Numerics") == 0; } return false; } bool isSIMDClass(typeInfo* pTypeInfo) { return pTypeInfo->IsStruct() && isSIMDClass(pTypeInfo->GetClassHandleForValueClass()); } bool isHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { #ifdef FEATURE_HW_INTRINSICS if (isIntrinsicType(clsHnd)) { const char* namespaceName = nullptr; (void)getClassNameFromMetadata(clsHnd, &namespaceName); return strcmp(namespaceName, "System.Runtime.Intrinsics") == 0; } #endif // FEATURE_HW_INTRINSICS return false; } bool isHWSIMDClass(typeInfo* pTypeInfo) { #ifdef FEATURE_HW_INTRINSICS return pTypeInfo->IsStruct() && isHWSIMDClass(pTypeInfo->GetClassHandleForValueClass()); #else return false; #endif } bool isSIMDorHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { return isSIMDClass(clsHnd) || isHWSIMDClass(clsHnd); } bool isSIMDorHWSIMDClass(typeInfo* pTypeInfo) { return isSIMDClass(pTypeInfo) || isHWSIMDClass(pTypeInfo); } // Get the base (element) type and size in bytes for a SIMD type. Returns CORINFO_TYPE_UNDEF // if it is not a SIMD type or is an unsupported base JIT type. CorInfoType getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes = nullptr); CorInfoType getBaseJitTypeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd) { return getBaseJitTypeAndSizeOfSIMDType(typeHnd, nullptr); } // Get SIMD Intrinsic info given the method handle. // Also sets typeHnd, argCount, baseType and sizeBytes out params. const SIMDIntrinsicInfo* getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* typeHnd, CORINFO_METHOD_HANDLE methodHnd, CORINFO_SIG_INFO* sig, bool isNewObj, unsigned* argCount, CorInfoType* simdBaseJitType, unsigned* sizeBytes); // Pops and returns GenTree node from importers type stack. // Normalizes TYP_STRUCT value in case of GT_CALL, GT_RET_EXPR and arg nodes. GenTree* impSIMDPopStack(var_types type, bool expectAddr = false, CORINFO_CLASS_HANDLE structType = nullptr); // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain given relop result. SIMDIntrinsicID impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId, CORINFO_CLASS_HANDLE typeHnd, unsigned simdVectorSize, CorInfoType* inOutBaseJitType, GenTree** op1, GenTree** op2); #if defined(TARGET_XARCH) // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain == comparison result. SIMDIntrinsicID impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd, unsigned simdVectorSize, GenTree** op1, GenTree** op2); #endif // defined(TARGET_XARCH) void setLclRelatedToSIMDIntrinsic(GenTree* tree); bool areFieldsContiguous(GenTree* op1, GenTree* op2); bool areLocalFieldsContiguous(GenTreeLclFld* first, GenTreeLclFld* second); bool areArrayElementsContiguous(GenTree* op1, GenTree* op2); bool areArgumentsContiguous(GenTree* op1, GenTree* op2); GenTree* createAddressNodeForSIMDInit(GenTree* tree, unsigned simdSize); // check methodHnd to see if it is a SIMD method that is expanded as an intrinsic in the JIT. GenTree* impSIMDIntrinsic(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef); GenTree* getOp1ForConstructor(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd); // Whether SIMD vector occupies part of SIMD register. // SSE2: vector2f/3f are considered sub register SIMD types. // AVX: vector2f, 3f and 4f are all considered sub register SIMD types. bool isSubRegisterSIMDType(GenTreeSIMD* simdNode) { unsigned vectorRegisterByteLength; #if defined(TARGET_XARCH) // Calling the getSIMDVectorRegisterByteLength api causes the size of Vector<T> to be recorded // with the AOT compiler, so that it cannot change from aot compilation time to runtime // This api does not require such fixing as it merely pertains to the size of the simd type // relative to the Vector<T> size as used at compile time. (So detecting a vector length of 16 here // does not preclude the code from being used on a machine with a larger vector length.) if (getSIMDSupportLevel() < SIMD_AVX2_Supported) { vectorRegisterByteLength = 16; } else { vectorRegisterByteLength = 32; } #else vectorRegisterByteLength = getSIMDVectorRegisterByteLength(); #endif return (simdNode->GetSimdSize() < vectorRegisterByteLength); } // Get the type for the hardware SIMD vector. // This is the maximum SIMD type supported for this target. var_types getSIMDVectorType() { #if defined(TARGET_XARCH) if (getSIMDSupportLevel() == SIMD_AVX2_Supported) { return TYP_SIMD32; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return TYP_SIMD16; } #elif defined(TARGET_ARM64) return TYP_SIMD16; #else assert(!"getSIMDVectorType() unimplemented on target arch"); unreached(); #endif } // Get the size of the SIMD type in bytes int getSIMDTypeSizeInBytes(CORINFO_CLASS_HANDLE typeHnd) { unsigned sizeBytes = 0; (void)getBaseJitTypeAndSizeOfSIMDType(typeHnd, &sizeBytes); return sizeBytes; } // Get the the number of elements of baseType of SIMD vector given by its size and baseType static int getSIMDVectorLength(unsigned simdSize, var_types baseType); // Get the the number of elements of baseType of SIMD vector given by its type handle int getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd); // Get preferred alignment of SIMD type. int getSIMDTypeAlignment(var_types simdType); // Get the number of bytes in a System.Numeric.Vector<T> for the current compilation. // Note - cannot be used for System.Runtime.Intrinsic unsigned getSIMDVectorRegisterByteLength() { #if defined(TARGET_XARCH) if (getSIMDSupportLevel() == SIMD_AVX2_Supported) { return YMM_REGSIZE_BYTES; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return XMM_REGSIZE_BYTES; } #elif defined(TARGET_ARM64) return FP_REGSIZE_BYTES; #else assert(!"getSIMDVectorRegisterByteLength() unimplemented on target arch"); unreached(); #endif } // The minimum and maximum possible number of bytes in a SIMD vector. // maxSIMDStructBytes // The minimum SIMD size supported by System.Numeric.Vectors or System.Runtime.Intrinsic // SSE: 16-byte Vector<T> and Vector128<T> // AVX: 32-byte Vector256<T> (Vector<T> is 16-byte) // AVX2: 32-byte Vector<T> and Vector256<T> unsigned int maxSIMDStructBytes() { #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_AVX)) { return YMM_REGSIZE_BYTES; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return XMM_REGSIZE_BYTES; } #else return getSIMDVectorRegisterByteLength(); #endif } unsigned int minSIMDStructBytes() { return emitTypeSize(TYP_SIMD8); } public: // Returns the codegen type for a given SIMD size. static var_types getSIMDTypeForSize(unsigned size) { var_types simdType = TYP_UNDEF; if (size == 8) { simdType = TYP_SIMD8; } else if (size == 12) { simdType = TYP_SIMD12; } else if (size == 16) { simdType = TYP_SIMD16; } else if (size == 32) { simdType = TYP_SIMD32; } else { noway_assert(!"Unexpected size for SIMD type"); } return simdType; } private: unsigned getSIMDInitTempVarNum(var_types simdType); #else // !FEATURE_SIMD bool isOpaqueSIMDLclVar(LclVarDsc* varDsc) { return false; } #endif // FEATURE_SIMD public: //------------------------------------------------------------------------ // largestEnregisterableStruct: The size in bytes of the largest struct that can be enregistered. // // Notes: It is not guaranteed that the struct of this size or smaller WILL be a // candidate for enregistration. unsigned largestEnregisterableStructSize() { #ifdef FEATURE_SIMD #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) if (opts.IsReadyToRun()) { // Return constant instead of maxSIMDStructBytes, as maxSIMDStructBytes performs // checks that are effected by the current level of instruction set support would // otherwise cause the highest level of instruction set support to be reported to crossgen2. // and this api is only ever used as an optimization or assert, so no reporting should // ever happen. return YMM_REGSIZE_BYTES; } #endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) unsigned vectorRegSize = maxSIMDStructBytes(); assert(vectorRegSize >= TARGET_POINTER_SIZE); return vectorRegSize; #else // !FEATURE_SIMD return TARGET_POINTER_SIZE; #endif // !FEATURE_SIMD } // Use to determine if a struct *might* be a SIMD type. As this function only takes a size, many // structs will fit the criteria. bool structSizeMightRepresentSIMDType(size_t structSize) { #ifdef FEATURE_SIMD // Do not use maxSIMDStructBytes as that api in R2R on X86 and X64 may notify the JIT // about the size of a struct under the assumption that the struct size needs to be recorded. // By using largestEnregisterableStructSize here, the detail of whether or not Vector256<T> is // enregistered or not will not be messaged to the R2R compiler. return (structSize >= minSIMDStructBytes()) && (structSize <= largestEnregisterableStructSize()); #else return false; #endif // FEATURE_SIMD } #ifdef FEATURE_SIMD static bool vnEncodesResultTypeForSIMDIntrinsic(SIMDIntrinsicID intrinsicId); #endif // !FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS static bool vnEncodesResultTypeForHWIntrinsic(NamedIntrinsic hwIntrinsicID); #endif // FEATURE_HW_INTRINSICS private: // These routines need not be enclosed under FEATURE_SIMD since lvIsSIMDType() // is defined for both FEATURE_SIMD and !FEATURE_SIMD apropriately. The use // of this routines also avoids the need of #ifdef FEATURE_SIMD specific code. // Is this var is of type simd struct? bool lclVarIsSIMDType(unsigned varNum) { return lvaGetDesc(varNum)->lvIsSIMDType(); } // Is this Local node a SIMD local? bool lclVarIsSIMDType(GenTreeLclVarCommon* lclVarTree) { return lclVarIsSIMDType(lclVarTree->GetLclNum()); } // Returns true if the TYP_SIMD locals on stack are aligned at their // preferred byte boundary specified by getSIMDTypeAlignment(). // // As per the Intel manual, the preferred alignment for AVX vectors is // 32-bytes. It is not clear whether additional stack space used in // aligning stack is worth the benefit and for now will use 16-byte // alignment for AVX 256-bit vectors with unaligned load/stores to/from // memory. On x86, the stack frame is aligned to 4 bytes. We need to extend // existing support for double (8-byte) alignment to 16 or 32 byte // alignment for frames with local SIMD vars, if that is determined to be // profitable. // // On Amd64 and SysV, RSP+8 is aligned on entry to the function (before // prolog has run). This means that in RBP-based frames RBP will be 16-byte // aligned. For RSP-based frames these are only sometimes aligned, depending // on the frame size. // bool isSIMDTypeLocalAligned(unsigned varNum) { #if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES if (lclVarIsSIMDType(varNum) && lvaTable[varNum].lvType != TYP_BYREF) { // TODO-Cleanup: Can't this use the lvExactSize on the varDsc? int alignment = getSIMDTypeAlignment(lvaTable[varNum].lvType); if (alignment <= STACK_ALIGN) { bool rbpBased; int off = lvaFrameAddress(varNum, &rbpBased); // On SysV and Winx64 ABIs RSP+8 will be 16-byte aligned at the // first instruction of a function. If our frame is RBP based // then RBP will always be 16 bytes aligned, so we can simply // check the offset. if (rbpBased) { return (off % alignment) == 0; } // For RSP-based frame the alignment of RSP depends on our // locals. rsp+8 is aligned on entry and we just subtract frame // size so it is not hard to compute. Note that the compiler // tries hard to make sure the frame size means RSP will be // 16-byte aligned, but for leaf functions without locals (i.e. // frameSize = 0) it will not be. int frameSize = codeGen->genTotalFrameSize(); return ((8 - frameSize + off) % alignment) == 0; } } #endif // FEATURE_SIMD return false; } #ifdef DEBUG // Answer the question: Is a particular ISA supported? // Use this api when asking the question so that future // ISA questions can be asked correctly or when asserting // support/nonsupport for an instruction set bool compIsaSupportedDebugOnly(CORINFO_InstructionSet isa) const { #if defined(TARGET_XARCH) || defined(TARGET_ARM64) return (opts.compSupportsISA & (1ULL << isa)) != 0; #else return false; #endif } #endif // DEBUG bool notifyInstructionSetUsage(CORINFO_InstructionSet isa, bool supported) const; // Answer the question: Is a particular ISA allowed to be used implicitly by optimizations? // The result of this api call will exactly match the target machine // on which the function is executed (except for CoreLib, where there are special rules) bool compExactlyDependsOn(CORINFO_InstructionSet isa) const { #if defined(TARGET_XARCH) || defined(TARGET_ARM64) uint64_t isaBit = (1ULL << isa); if ((opts.compSupportsISAReported & isaBit) == 0) { if (notifyInstructionSetUsage(isa, (opts.compSupportsISA & isaBit) != 0)) ((Compiler*)this)->opts.compSupportsISAExactly |= isaBit; ((Compiler*)this)->opts.compSupportsISAReported |= isaBit; } return (opts.compSupportsISAExactly & isaBit) != 0; #else return false; #endif } // Ensure that code will not execute if an instruction set is usable. Call only // if the instruction set has previously reported as unusable, but when // that that status has not yet been recorded to the AOT compiler void compVerifyInstructionSetUnusable(CORINFO_InstructionSet isa) { // use compExactlyDependsOn to capture are record the use of the isa bool isaUsable = compExactlyDependsOn(isa); // Assert that the is unusable. If true, this function should never be called. assert(!isaUsable); } // Answer the question: Is a particular ISA allowed to be used implicitly by optimizations? // The result of this api call will match the target machine if the result is true // If the result is false, then the target machine may have support for the instruction bool compOpportunisticallyDependsOn(CORINFO_InstructionSet isa) const { if ((opts.compSupportsISA & (1ULL << isa)) != 0) { return compExactlyDependsOn(isa); } else { return false; } } // Answer the question: Is a particular ISA supported for explicit hardware intrinsics? bool compHWIntrinsicDependsOn(CORINFO_InstructionSet isa) const { // Report intent to use the ISA to the EE compExactlyDependsOn(isa); return ((opts.compSupportsISA & (1ULL << isa)) != 0); } bool canUseVexEncoding() const { #ifdef TARGET_XARCH return compOpportunisticallyDependsOn(InstructionSet_AVX); #else return false; #endif } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XX Generic info about the compilation and the method being compiled. XX XX It is responsible for driving the other phases. XX XX It is also responsible for all the memory management. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: Compiler* InlineeCompiler; // The Compiler instance for the inlinee InlineResult* compInlineResult; // The result of importing the inlinee method. bool compDoAggressiveInlining; // If true, mark every method as CORINFO_FLG_FORCEINLINE bool compJmpOpUsed; // Does the method do a JMP bool compLongUsed; // Does the method use TYP_LONG bool compFloatingPointUsed; // Does the method use TYP_FLOAT or TYP_DOUBLE bool compTailCallUsed; // Does the method do a tailcall bool compTailPrefixSeen; // Does the method IL have tail. prefix bool compLocallocSeen; // Does the method IL have localloc opcode bool compLocallocUsed; // Does the method use localloc. bool compLocallocOptimized; // Does the method have an optimized localloc bool compQmarkUsed; // Does the method use GT_QMARK/GT_COLON bool compQmarkRationalized; // Is it allowed to use a GT_QMARK/GT_COLON node. bool compHasBackwardJump; // Does the method (or some inlinee) have a lexically backwards jump? bool compHasBackwardJumpInHandler; // Does the method have a lexically backwards jump in a handler? bool compSwitchedToOptimized; // Codegen initially was Tier0 but jit switched to FullOpts bool compSwitchedToMinOpts; // Codegen initially was Tier1/FullOpts but jit switched to MinOpts bool compSuppressedZeroInit; // There are vars with lvSuppressedZeroInit set // NOTE: These values are only reliable after // the importing is completely finished. #ifdef DEBUG // State information - which phases have completed? // These are kept together for easy discoverability bool bRangeAllowStress; bool compCodeGenDone; int64_t compNumStatementLinksTraversed; // # of links traversed while doing debug checks bool fgNormalizeEHDone; // Has the flowgraph EH normalization phase been done? size_t compSizeEstimate; // The estimated size of the method as per `gtSetEvalOrder`. size_t compCycleEstimate; // The estimated cycle count of the method as per `gtSetEvalOrder` #endif // DEBUG bool fgLocalVarLivenessDone; // Note that this one is used outside of debug. bool fgLocalVarLivenessChanged; bool compLSRADone; bool compRationalIRForm; bool compUsesThrowHelper; // There is a call to a THROW_HELPER for the compiled method. bool compGeneratingProlog; bool compGeneratingEpilog; bool compNeedsGSSecurityCookie; // There is an unsafe buffer (or localloc) on the stack. // Insert cookie on frame and code to check the cookie, like VC++ -GS. bool compGSReorderStackLayout; // There is an unsafe buffer on the stack, reorder locals and make local // copies of susceptible parameters to avoid buffer overrun attacks through locals/params bool getNeedsGSSecurityCookie() const { return compNeedsGSSecurityCookie; } void setNeedsGSSecurityCookie() { compNeedsGSSecurityCookie = true; } FrameLayoutState lvaDoneFrameLayout; // The highest frame layout state that we've completed. During // frame layout calculations, this is the level we are currently // computing. //---------------------------- JITing options ----------------------------- enum codeOptimize { BLENDED_CODE, SMALL_CODE, FAST_CODE, COUNT_OPT_CODE }; struct Options { JitFlags* jitFlags; // all flags passed from the EE // The instruction sets that the compiler is allowed to emit. uint64_t compSupportsISA; // The instruction sets that were reported to the VM as being used by the current method. Subset of // compSupportsISA. uint64_t compSupportsISAReported; // The instruction sets that the compiler is allowed to take advantage of implicitly during optimizations. // Subset of compSupportsISA. // The instruction sets available in compSupportsISA and not available in compSupportsISAExactly can be only // used via explicit hardware intrinsics. uint64_t compSupportsISAExactly; void setSupportedISAs(CORINFO_InstructionSetFlags isas) { compSupportsISA = isas.GetFlagsRaw(); } unsigned compFlags; // method attributes unsigned instrCount; unsigned lvRefCount; codeOptimize compCodeOpt; // what type of code optimizations bool compUseCMOV; // optimize maximally and/or favor speed over size? #define DEFAULT_MIN_OPTS_CODE_SIZE 60000 #define DEFAULT_MIN_OPTS_INSTR_COUNT 20000 #define DEFAULT_MIN_OPTS_BB_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_NUM_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_REF_COUNT 8000 // Maximun number of locals before turning off the inlining #define MAX_LV_NUM_COUNT_FOR_INLINING 512 bool compMinOpts; bool compMinOptsIsSet; #ifdef DEBUG mutable bool compMinOptsIsUsed; bool MinOpts() const { assert(compMinOptsIsSet); compMinOptsIsUsed = true; return compMinOpts; } bool IsMinOptsSet() const { return compMinOptsIsSet; } #else // !DEBUG bool MinOpts() const { return compMinOpts; } bool IsMinOptsSet() const { return compMinOptsIsSet; } #endif // !DEBUG bool OptimizationDisabled() const { return MinOpts() || compDbgCode; } bool OptimizationEnabled() const { return !OptimizationDisabled(); } void SetMinOpts(bool val) { assert(!compMinOptsIsUsed); assert(!compMinOptsIsSet || (compMinOpts == val)); compMinOpts = val; compMinOptsIsSet = true; } // true if the CLFLG_* for an optimization is set. bool OptEnabled(unsigned optFlag) const { return !!(compFlags & optFlag); } #ifdef FEATURE_READYTORUN bool IsReadyToRun() const { return jitFlags->IsSet(JitFlags::JIT_FLAG_READYTORUN); } #else bool IsReadyToRun() const { return false; } #endif // Check if the compilation is control-flow guard enabled. bool IsCFGEnabled() const { #if defined(TARGET_ARM64) || defined(TARGET_AMD64) // On these platforms we assume the register that the target is // passed in is preserved by the validator and take care to get the // target from the register for the call (even in debug mode). static_assert_no_msg((RBM_VALIDATE_INDIRECT_CALL_TRASH & (1 << REG_VALIDATE_INDIRECT_CALL_ADDR)) == 0); if (JitConfig.JitForceControlFlowGuard()) return true; return jitFlags->IsSet(JitFlags::JIT_FLAG_ENABLE_CFG); #else // The remaining platforms are not supported and would require some // work to support. // // ARM32: // The ARM32 validator does not preserve any volatile registers // which means we have to take special care to allocate and use a // callee-saved register (reloading the target from memory is a // security issue). // // x86: // On x86 some VSD calls disassemble the call site and expect an // indirect call which is fundamentally incompatible with CFG. // This would require a different way to pass this information // through. // return false; #endif } #ifdef FEATURE_ON_STACK_REPLACEMENT bool IsOSR() const { return jitFlags->IsSet(JitFlags::JIT_FLAG_OSR); } #else bool IsOSR() const { return false; } #endif // true if we should use the PINVOKE_{BEGIN,END} helpers instead of generating // PInvoke transitions inline. Normally used by R2R, but also used when generating a reverse pinvoke frame, as // the current logic for frame setup initializes and pushes // the InlinedCallFrame before performing the Reverse PInvoke transition, which is invalid (as frames cannot // safely be pushed/popped while the thread is in a preemptive state.). bool ShouldUsePInvokeHelpers() { return jitFlags->IsSet(JitFlags::JIT_FLAG_USE_PINVOKE_HELPERS) || jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE); } // true if we should use insert the REVERSE_PINVOKE_{ENTER,EXIT} helpers in the method // prolog/epilog bool IsReversePInvoke() { return jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE); } bool compScopeInfo; // Generate the LocalVar info ? bool compDbgCode; // Generate debugger-friendly code? bool compDbgInfo; // Gather debugging info? bool compDbgEnC; #ifdef PROFILING_SUPPORTED bool compNoPInvokeInlineCB; #else static const bool compNoPInvokeInlineCB; #endif #ifdef DEBUG bool compGcChecks; // Check arguments and return values to ensure they are sane #endif #if defined(DEBUG) && defined(TARGET_XARCH) bool compStackCheckOnRet; // Check stack pointer on return to ensure it is correct. #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) bool compStackCheckOnCall; // Check stack pointer after call to ensure it is correct. Only for x86. #endif // defined(DEBUG) && defined(TARGET_X86) bool compReloc; // Generate relocs for pointers in code, true for all ngen/prejit codegen #ifdef DEBUG #if defined(TARGET_XARCH) bool compEnablePCRelAddr; // Whether absolute addr be encoded as PC-rel offset by RyuJIT where possible #endif #endif // DEBUG #ifdef UNIX_AMD64_ABI // This flag is indicating if there is a need to align the frame. // On AMD64-Windows, if there are calls, 4 slots for the outgoing ars are allocated, except for // FastTailCall. This slots makes the frame size non-zero, so alignment logic will be called. // On AMD64-Unix, there are no such slots. There is a possibility to have calls in the method with frame size of // 0. The frame alignment logic won't kick in. This flags takes care of the AMD64-Unix case by remembering that // there are calls and making sure the frame alignment logic is executed. bool compNeedToAlignFrame; #endif // UNIX_AMD64_ABI bool compProcedureSplitting; // Separate cold code from hot code bool genFPorder; // Preserve FP order (operations are non-commutative) bool genFPopt; // Can we do frame-pointer-omission optimization? bool altJit; // True if we are an altjit and are compiling this method #ifdef OPT_CONFIG bool optRepeat; // Repeat optimizer phases k times #endif #ifdef DEBUG bool compProcedureSplittingEH; // Separate cold code from hot code for functions with EH bool dspCode; // Display native code generated bool dspEHTable; // Display the EH table reported to the VM bool dspDebugInfo; // Display the Debug info reported to the VM bool dspInstrs; // Display the IL instructions intermixed with the native code output bool dspLines; // Display source-code lines intermixed with native code output bool dmpHex; // Display raw bytes in hex of native code output bool varNames; // Display variables names in native code output bool disAsm; // Display native code as it is generated bool disAsmSpilled; // Display native code when any register spilling occurs bool disasmWithGC; // Display GC info interleaved with disassembly. bool disDiffable; // Makes the Disassembly code 'diff-able' bool disAddr; // Display process address next to each instruction in disassembly code bool disAlignment; // Display alignment boundaries in disassembly code bool disAsm2; // Display native code after it is generated using external disassembler bool dspOrder; // Display names of each of the methods that we ngen/jit bool dspUnwind; // Display the unwind info output bool dspDiffable; // Makes the Jit Dump 'diff-able' (currently uses same COMPlus_* flag as disDiffable) bool compLongAddress; // Force using large pseudo instructions for long address // (IF_LARGEJMP/IF_LARGEADR/IF_LARGLDC) bool dspGCtbls; // Display the GC tables #endif bool compExpandCallsEarly; // True if we should expand virtual call targets early for this method // Default numbers used to perform loop alignment. All the numbers are choosen // based on experimenting with various benchmarks. // Default minimum loop block weight required to enable loop alignment. #define DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT 4 // By default a loop will be aligned at 32B address boundary to get better // performance as per architecture manuals. #define DEFAULT_ALIGN_LOOP_BOUNDARY 0x20 // For non-adaptive loop alignment, by default, only align a loop whose size is // at most 3 times the alignment block size. If the loop is bigger than that, it is most // likely complicated enough that loop alignment will not impact performance. #define DEFAULT_MAX_LOOPSIZE_FOR_ALIGN DEFAULT_ALIGN_LOOP_BOUNDARY * 3 #ifdef DEBUG // Loop alignment variables // If set, for non-adaptive alignment, ensure loop jmps are not on or cross alignment boundary. bool compJitAlignLoopForJcc; #endif // For non-adaptive alignment, minimum loop size (in bytes) for which alignment will be done. unsigned short compJitAlignLoopMaxCodeSize; // Minimum weight needed for the first block of a loop to make it a candidate for alignment. unsigned short compJitAlignLoopMinBlockWeight; // For non-adaptive alignment, address boundary (power of 2) at which loop alignment should // be done. By default, 32B. unsigned short compJitAlignLoopBoundary; // Padding limit to align a loop. unsigned short compJitAlignPaddingLimit; // If set, perform adaptive loop alignment that limits number of padding based on loop size. bool compJitAlignLoopAdaptive; // If set, tries to hide alignment instructions behind unconditional jumps. bool compJitHideAlignBehindJmp; #ifdef LATE_DISASM bool doLateDisasm; // Run the late disassembler #endif // LATE_DISASM #if DUMP_GC_TABLES && !defined(DEBUG) #pragma message("NOTE: this non-debug build has GC ptr table dumping always enabled!") static const bool dspGCtbls = true; #endif #ifdef PROFILING_SUPPORTED // Whether to emit Enter/Leave/TailCall hooks using a dummy stub (DummyProfilerELTStub()). // This option helps make the JIT behave as if it is running under a profiler. bool compJitELTHookEnabled; #endif // PROFILING_SUPPORTED #if FEATURE_TAILCALL_OPT // Whether opportunistic or implicit tail call optimization is enabled. bool compTailCallOpt; // Whether optimization of transforming a recursive tail call into a loop is enabled. bool compTailCallLoopOpt; #endif #if FEATURE_FASTTAILCALL // Whether fast tail calls are allowed. bool compFastTailCalls; #endif // FEATURE_FASTTAILCALL #if defined(TARGET_ARM64) // Decision about whether to save FP/LR registers with callee-saved registers (see // COMPlus_JitSaveFpLrWithCalleSavedRegisters). int compJitSaveFpLrWithCalleeSavedRegisters; #endif // defined(TARGET_ARM64) #ifdef CONFIGURABLE_ARM_ABI bool compUseSoftFP = false; #else #ifdef ARM_SOFTFP static const bool compUseSoftFP = true; #else // !ARM_SOFTFP static const bool compUseSoftFP = false; #endif // ARM_SOFTFP #endif // CONFIGURABLE_ARM_ABI } opts; static bool s_pAltJitExcludeAssembliesListInitialized; static AssemblyNamesList2* s_pAltJitExcludeAssembliesList; #ifdef DEBUG static bool s_pJitDisasmIncludeAssembliesListInitialized; static AssemblyNamesList2* s_pJitDisasmIncludeAssembliesList; static bool s_pJitFunctionFileInitialized; static MethodSet* s_pJitMethodSet; #endif // DEBUG #ifdef DEBUG // silence warning of cast to greater size. It is easier to silence than construct code the compiler is happy with, and // it is safe in this case #pragma warning(push) #pragma warning(disable : 4312) template <typename T> T dspPtr(T p) { return (p == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : p); } template <typename T> T dspOffset(T o) { return (o == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : o); } #pragma warning(pop) static int dspTreeID(GenTree* tree) { return tree->gtTreeID; } static void printStmtID(Statement* stmt) { assert(stmt != nullptr); printf(FMT_STMT, stmt->GetID()); } static void printTreeID(GenTree* tree) { if (tree == nullptr) { printf("[------]"); } else { printf("[%06d]", dspTreeID(tree)); } } const char* pgoSourceToString(ICorJitInfo::PgoSource p); const char* devirtualizationDetailToString(CORINFO_DEVIRTUALIZATION_DETAIL detail); #endif // DEBUG // clang-format off #define STRESS_MODES \ \ STRESS_MODE(NONE) \ \ /* "Variations" stress areas which we try to mix up with each other. */ \ /* These should not be exhaustively used as they might */ \ /* hide/trivialize other areas */ \ \ STRESS_MODE(REGS) \ STRESS_MODE(DBL_ALN) \ STRESS_MODE(LCL_FLDS) \ STRESS_MODE(UNROLL_LOOPS) \ STRESS_MODE(MAKE_CSE) \ STRESS_MODE(LEGACY_INLINE) \ STRESS_MODE(CLONE_EXPR) \ STRESS_MODE(USE_CMOV) \ STRESS_MODE(FOLD) \ STRESS_MODE(MERGED_RETURNS) \ STRESS_MODE(BB_PROFILE) \ STRESS_MODE(OPT_BOOLS_GC) \ STRESS_MODE(REMORPH_TREES) \ STRESS_MODE(64RSLT_MUL) \ STRESS_MODE(DO_WHILE_LOOPS) \ STRESS_MODE(MIN_OPTS) \ STRESS_MODE(REVERSE_FLAG) /* Will set GTF_REVERSE_OPS whenever we can */ \ STRESS_MODE(REVERSE_COMMA) /* Will reverse commas created with gtNewCommaNode */ \ STRESS_MODE(TAILCALL) /* Will make the call as a tailcall whenever legal */ \ STRESS_MODE(CATCH_ARG) /* Will spill catch arg */ \ STRESS_MODE(UNSAFE_BUFFER_CHECKS) \ STRESS_MODE(NULL_OBJECT_CHECK) \ STRESS_MODE(PINVOKE_RESTORE_ESP) \ STRESS_MODE(RANDOM_INLINE) \ STRESS_MODE(SWITCH_CMP_BR_EXPANSION) \ STRESS_MODE(GENERIC_VARN) \ STRESS_MODE(PROFILER_CALLBACKS) /* Will generate profiler hooks for ELT callbacks */ \ STRESS_MODE(BYREF_PROMOTION) /* Change undoPromotion decisions for byrefs */ \ STRESS_MODE(PROMOTE_FEWER_STRUCTS)/* Don't promote some structs that can be promoted */ \ STRESS_MODE(VN_BUDGET)/* Randomize the VN budget */ \ \ /* After COUNT_VARN, stress level 2 does all of these all the time */ \ \ STRESS_MODE(COUNT_VARN) \ \ /* "Check" stress areas that can be exhaustively used if we */ \ /* dont care about performance at all */ \ \ STRESS_MODE(FORCE_INLINE) /* Treat every method as AggressiveInlining */ \ STRESS_MODE(CHK_FLOW_UPDATE) \ STRESS_MODE(EMITTER) \ STRESS_MODE(CHK_REIMPORT) \ STRESS_MODE(FLATFP) \ STRESS_MODE(GENERIC_CHECK) \ STRESS_MODE(COUNT) enum compStressArea { #define STRESS_MODE(mode) STRESS_##mode, STRESS_MODES #undef STRESS_MODE }; // clang-format on #ifdef DEBUG static const LPCWSTR s_compStressModeNames[STRESS_COUNT + 1]; BYTE compActiveStressModes[STRESS_COUNT]; #endif // DEBUG #define MAX_STRESS_WEIGHT 100 bool compStressCompile(compStressArea stressArea, unsigned weightPercentage); bool compStressCompileHelper(compStressArea stressArea, unsigned weightPercentage); #ifdef DEBUG bool compInlineStress() { return compStressCompile(STRESS_LEGACY_INLINE, 50); } bool compRandomInlineStress() { return compStressCompile(STRESS_RANDOM_INLINE, 50); } bool compPromoteFewerStructs(unsigned lclNum); #endif // DEBUG bool compTailCallStress() { #ifdef DEBUG // Do not stress tailcalls in IL stubs as the runtime creates several IL // stubs to implement the tailcall mechanism, which would then // recursively create more IL stubs. return !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && (JitConfig.TailcallStress() != 0 || compStressCompile(STRESS_TAILCALL, 5)); #else return false; #endif } const char* compGetTieringName(bool wantShortName = false) const; const char* compGetStressMessage() const; codeOptimize compCodeOpt() const { #if 0 // Switching between size & speed has measurable throughput impact // (3.5% on NGen CoreLib when measured). It used to be enabled for // DEBUG, but should generate identical code between CHK & RET builds, // so that's not acceptable. // TODO-Throughput: Figure out what to do about size vs. speed & throughput. // Investigate the cause of the throughput regression. return opts.compCodeOpt; #else return BLENDED_CODE; #endif } //--------------------- Info about the procedure -------------------------- struct Info { COMP_HANDLE compCompHnd; CORINFO_MODULE_HANDLE compScopeHnd; CORINFO_CLASS_HANDLE compClassHnd; CORINFO_METHOD_HANDLE compMethodHnd; CORINFO_METHOD_INFO* compMethodInfo; bool hasCircularClassConstraints; bool hasCircularMethodConstraints; #if defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS const char* compMethodName; const char* compClassName; const char* compFullName; double compPerfScore; int compMethodSuperPMIIndex; // useful when debugging under SuperPMI #endif // defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS #if defined(DEBUG) || defined(INLINE_DATA) // Method hash is logically const, but computed // on first demand. mutable unsigned compMethodHashPrivate; unsigned compMethodHash() const; #endif // defined(DEBUG) || defined(INLINE_DATA) #ifdef PSEUDORANDOM_NOP_INSERTION // things for pseudorandom nop insertion unsigned compChecksum; CLRRandom compRNG; #endif // The following holds the FLG_xxxx flags for the method we're compiling. unsigned compFlags; // The following holds the class attributes for the method we're compiling. unsigned compClassAttr; const BYTE* compCode; IL_OFFSET compILCodeSize; // The IL code size IL_OFFSET compILImportSize; // Estimated amount of IL actually imported IL_OFFSET compILEntry; // The IL entry point (normally 0) PatchpointInfo* compPatchpointInfo; // Patchpoint data for OSR (normally nullptr) UNATIVE_OFFSET compNativeCodeSize; // The native code size, after instructions are issued. This // is less than (compTotalHotCodeSize + compTotalColdCodeSize) only if: // (1) the code is not hot/cold split, and we issued less code than we expected, or // (2) the code is hot/cold split, and we issued less code than we expected // in the cold section (the hot section will always be padded out to compTotalHotCodeSize). bool compIsStatic : 1; // Is the method static (no 'this' pointer)? bool compIsVarArgs : 1; // Does the method have varargs parameters? bool compInitMem : 1; // Is the CORINFO_OPT_INIT_LOCALS bit set in the method info options? bool compProfilerCallback : 1; // JIT inserted a profiler Enter callback bool compPublishStubParam : 1; // EAX captured in prolog will be available through an intrinsic bool compHasNextCallRetAddr : 1; // The NextCallReturnAddress intrinsic is used. var_types compRetType; // Return type of the method as declared in IL var_types compRetNativeType; // Normalized return type as per target arch ABI unsigned compILargsCount; // Number of arguments (incl. implicit but not hidden) unsigned compArgsCount; // Number of arguments (incl. implicit and hidden) #if FEATURE_FASTTAILCALL unsigned compArgStackSize; // Incoming argument stack size in bytes #endif // FEATURE_FASTTAILCALL unsigned compRetBuffArg; // position of hidden return param var (0, 1) (BAD_VAR_NUM means not present); int compTypeCtxtArg; // position of hidden param for type context for generic code (CORINFO_CALLCONV_PARAMTYPE) unsigned compThisArg; // position of implicit this pointer param (not to be confused with lvaArg0Var) unsigned compILlocalsCount; // Number of vars : args + locals (incl. implicit but not hidden) unsigned compLocalsCount; // Number of vars : args + locals (incl. implicit and hidden) unsigned compMaxStack; UNATIVE_OFFSET compTotalHotCodeSize; // Total number of bytes of Hot Code in the method UNATIVE_OFFSET compTotalColdCodeSize; // Total number of bytes of Cold Code in the method unsigned compUnmanagedCallCountWithGCTransition; // count of unmanaged calls with GC transition. CorInfoCallConvExtension compCallConv; // The entry-point calling convention for this method. unsigned compLvFrameListRoot; // lclNum for the Frame root unsigned compXcptnsCount; // Number of exception-handling clauses read in the method's IL. // You should generally use compHndBBtabCount instead: it is the // current number of EH clauses (after additions like synchronized // methods and funclets, and removals like unreachable code deletion). Target::ArgOrder compArgOrder; bool compMatchedVM; // true if the VM is "matched": either the JIT is a cross-compiler // and the VM expects that, or the JIT is a "self-host" compiler // (e.g., x86 hosted targeting x86) and the VM expects that. /* The following holds IL scope information about local variables. */ unsigned compVarScopesCount; VarScopeDsc* compVarScopes; /* The following holds information about instr offsets for * which we need to report IP-mappings */ IL_OFFSET* compStmtOffsets; // sorted unsigned compStmtOffsetsCount; ICorDebugInfo::BoundaryTypes compStmtOffsetsImplicit; #define CPU_X86 0x0100 // The generic X86 CPU #define CPU_X86_PENTIUM_4 0x0110 #define CPU_X64 0x0200 // The generic x64 CPU #define CPU_AMD_X64 0x0210 // AMD x64 CPU #define CPU_INTEL_X64 0x0240 // Intel x64 CPU #define CPU_ARM 0x0300 // The generic ARM CPU #define CPU_ARM64 0x0400 // The generic ARM64 CPU unsigned genCPU; // What CPU are we running on // Number of class profile probes in this method unsigned compClassProbeCount; } info; // Returns true if the method being compiled returns a non-void and non-struct value. // Note that lvaInitTypeRef() normalizes compRetNativeType for struct returns in a // single register as per target arch ABI (e.g on Amd64 Windows structs of size 1, 2, // 4 or 8 gets normalized to TYP_BYTE/TYP_SHORT/TYP_INT/TYP_LONG; On Arm HFA structs). // Methods returning such structs are considered to return non-struct return value and // this method returns true in that case. bool compMethodReturnsNativeScalarType() { return (info.compRetType != TYP_VOID) && !varTypeIsStruct(info.compRetNativeType); } // Returns true if the method being compiled returns RetBuf addr as its return value bool compMethodReturnsRetBufAddr() { // There are cases where implicit RetBuf argument should be explicitly returned in a register. // In such cases the return type is changed to TYP_BYREF and appropriate IR is generated. // These cases are: CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_AMD64 // 1. on x64 Windows and Unix the address of RetBuf needs to be returned by // methods with hidden RetBufArg in RAX. In such case GT_RETURN is of TYP_BYREF, // returning the address of RetBuf. return (info.compRetBuffArg != BAD_VAR_NUM); #else // TARGET_AMD64 #ifdef PROFILING_SUPPORTED // 2. Profiler Leave callback expects the address of retbuf as return value for // methods with hidden RetBuf argument. impReturnInstruction() when profiler // callbacks are needed creates GT_RETURN(TYP_BYREF, op1 = Addr of RetBuf) for // methods with hidden RetBufArg. if (compIsProfilerHookNeeded()) { return (info.compRetBuffArg != BAD_VAR_NUM); } #endif // 3. Windows ARM64 native instance calling convention requires the address of RetBuff // to be returned in x0. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM64) if (TargetOS::IsWindows) { auto callConv = info.compCallConv; if (callConvIsInstanceMethodCallConv(callConv)) { return (info.compRetBuffArg != BAD_VAR_NUM); } } #endif // TARGET_ARM64 // 4. x86 unmanaged calling conventions require the address of RetBuff to be returned in eax. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) if (info.compCallConv != CorInfoCallConvExtension::Managed) { return (info.compRetBuffArg != BAD_VAR_NUM); } #endif return false; #endif // TARGET_AMD64 } // Returns true if the method returns a value in more than one return register // TODO-ARM-Bug: Deal with multi-register genReturnLocaled structs? // TODO-ARM64: Does this apply for ARM64 too? bool compMethodReturnsMultiRegRetType() { #if FEATURE_MULTIREG_RET #if defined(TARGET_X86) // On x86, 64-bit longs and structs are returned in multiple registers return varTypeIsLong(info.compRetNativeType) || (varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM)); #else // targets: X64-UNIX, ARM64 or ARM32 // On all other targets that support multireg return values: // Methods returning a struct in multiple registers have a return value of TYP_STRUCT. // Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM); #endif // TARGET_XXX #else // not FEATURE_MULTIREG_RET // For this architecture there are no multireg returns return false; #endif // FEATURE_MULTIREG_RET } bool compEnregLocals() { return ((opts.compFlags & CLFLG_REGVAR) != 0); } bool compEnregStructLocals() { return (JitConfig.JitEnregStructLocals() != 0); } bool compObjectStackAllocation() { return (JitConfig.JitObjectStackAllocation() != 0); } // Returns true if the method returns a value in more than one return register, // it should replace/be merged with compMethodReturnsMultiRegRetType when #36868 is fixed. // The difference from original `compMethodReturnsMultiRegRetType` is in ARM64 SIMD* handling, // this method correctly returns false for it (it is passed as HVA), when the original returns true. bool compMethodReturnsMultiRegRegTypeAlternate() { #if FEATURE_MULTIREG_RET #if defined(TARGET_X86) // On x86, 64-bit longs and structs are returned in multiple registers return varTypeIsLong(info.compRetNativeType) || (varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM)); #else // targets: X64-UNIX, ARM64 or ARM32 #if defined(TARGET_ARM64) // TYP_SIMD* are returned in one register. if (varTypeIsSIMD(info.compRetNativeType)) { return false; } #endif // On all other targets that support multireg return values: // Methods returning a struct in multiple registers have a return value of TYP_STRUCT. // Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM); #endif // TARGET_XXX #else // not FEATURE_MULTIREG_RET // For this architecture there are no multireg returns return false; #endif // FEATURE_MULTIREG_RET } // Returns true if the method being compiled returns a value bool compMethodHasRetVal() { return compMethodReturnsNativeScalarType() || compMethodReturnsRetBufAddr() || compMethodReturnsMultiRegRetType(); } // Returns true if the method requires a PInvoke prolog and epilog bool compMethodRequiresPInvokeFrame() { return (info.compUnmanagedCallCountWithGCTransition > 0); } // Returns true if address-exposed user variables should be poisoned with a recognizable value bool compShouldPoisonFrame() { #ifdef FEATURE_ON_STACK_REPLACEMENT if (opts.IsOSR()) return false; #endif return !info.compInitMem && opts.compDbgCode; } // Returns true if the jit supports having patchpoints in this method. // Optionally, get the reason why not. bool compCanHavePatchpoints(const char** reason = nullptr); #if defined(DEBUG) void compDispLocalVars(); #endif // DEBUG private: class ClassLayoutTable* m_classLayoutTable; class ClassLayoutTable* typCreateClassLayoutTable(); class ClassLayoutTable* typGetClassLayoutTable(); public: // Get the layout having the specified layout number. ClassLayout* typGetLayoutByNum(unsigned layoutNum); // Get the layout number of the specified layout. unsigned typGetLayoutNum(ClassLayout* layout); // Get the layout having the specified size but no class handle. ClassLayout* typGetBlkLayout(unsigned blockSize); // Get the number of a layout having the specified size but no class handle. unsigned typGetBlkLayoutNum(unsigned blockSize); // Get the layout for the specified class handle. ClassLayout* typGetObjLayout(CORINFO_CLASS_HANDLE classHandle); // Get the number of a layout for the specified class handle. unsigned typGetObjLayoutNum(CORINFO_CLASS_HANDLE classHandle); //-------------------------- Global Compiler Data ------------------------------------ #ifdef DEBUG private: static LONG s_compMethodsCount; // to produce unique label names #endif public: #ifdef DEBUG LONG compMethodID; unsigned compGenTreeID; unsigned compStatementID; unsigned compBasicBlockID; #endif BasicBlock* compCurBB; // the current basic block in process Statement* compCurStmt; // the current statement in process GenTree* compCurTree; // the current tree in process // The following is used to create the 'method JIT info' block. size_t compInfoBlkSize; BYTE* compInfoBlkAddr; EHblkDsc* compHndBBtab; // array of EH data unsigned compHndBBtabCount; // element count of used elements in EH data array unsigned compHndBBtabAllocCount; // element count of allocated elements in EH data array #if defined(TARGET_X86) //------------------------------------------------------------------------- // Tracking of region covered by the monitor in synchronized methods void* syncStartEmitCookie; // the emitter cookie for first instruction after the call to MON_ENTER void* syncEndEmitCookie; // the emitter cookie for first instruction after the call to MON_EXIT #endif // !TARGET_X86 Phases mostRecentlyActivePhase; // the most recently active phase PhaseChecks activePhaseChecks; // the currently active phase checks //------------------------------------------------------------------------- // The following keeps track of how many bytes of local frame space we've // grabbed so far in the current function, and how many argument bytes we // need to pop when we return. // unsigned compLclFrameSize; // secObject+lclBlk+locals+temps // Count of callee-saved regs we pushed in the prolog. // Does not include EBP for isFramePointerUsed() and double-aligned frames. // In case of Amd64 this doesn't include float regs saved on stack. unsigned compCalleeRegsPushed; #if defined(TARGET_XARCH) // Mask of callee saved float regs on stack. regMaskTP compCalleeFPRegsSavedMask; #endif #ifdef TARGET_AMD64 // Quirk for VS debug-launch scenario to work: // Bytes of padding between save-reg area and locals. #define VSQUIRK_STACK_PAD (2 * REGSIZE_BYTES) unsigned compVSQuirkStackPaddingNeeded; #endif unsigned compArgSize; // total size of arguments in bytes (including register args (lvIsRegArg)) unsigned compMapILargNum(unsigned ILargNum); // map accounting for hidden args unsigned compMapILvarNum(unsigned ILvarNum); // map accounting for hidden args unsigned compMap2ILvarNum(unsigned varNum) const; // map accounting for hidden args #if defined(TARGET_ARM64) struct FrameInfo { // Frame type (1-5) int frameType; // Distance from established (method body) SP to base of callee save area int calleeSaveSpOffset; // Amount to subtract from SP before saving (prolog) OR // to add to SP after restoring (epilog) callee saves int calleeSaveSpDelta; // Distance from established SP to where caller's FP was saved int offsetSpToSavedFp; } compFrameInfo; #endif //------------------------------------------------------------------------- static void compStartup(); // One-time initialization static void compShutdown(); // One-time finalization void compInit(ArenaAllocator* pAlloc, CORINFO_METHOD_HANDLE methodHnd, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, InlineInfo* inlineInfo); void compDone(); static void compDisplayStaticSizes(FILE* fout); //------------ Some utility functions -------------- void* compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ void** ppIndirection); /* OUT */ // Several JIT/EE interface functions return a CorInfoType, and also return a // class handle as an out parameter if the type is a value class. Returns the // size of the type these describe. unsigned compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd); // Returns true if the method being compiled has a return buffer. bool compHasRetBuffArg(); #ifdef DEBUG // Components used by the compiler may write unit test suites, and // have them run within this method. They will be run only once per process, and only // in debug. (Perhaps should be under the control of a COMPlus_ flag.) // These should fail by asserting. void compDoComponentUnitTestsOnce(); #endif // DEBUG int compCompile(CORINFO_MODULE_HANDLE classPtr, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags); void compCompileFinish(); int compCompileHelper(CORINFO_MODULE_HANDLE classPtr, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlag); ArenaAllocator* compGetArenaAllocator(); void generatePatchpointInfo(); #if MEASURE_MEM_ALLOC static bool s_dspMemStats; // Display per-phase memory statistics for every function #endif // MEASURE_MEM_ALLOC #if LOOP_HOIST_STATS unsigned m_loopsConsidered; bool m_curLoopHasHoistedExpression; unsigned m_loopsWithHoistedExpressions; unsigned m_totalHoistedExpressions; void AddLoopHoistStats(); void PrintPerMethodLoopHoistStats(); static CritSecObject s_loopHoistStatsLock; // This lock protects the data structures below. static unsigned s_loopsConsidered; static unsigned s_loopsWithHoistedExpressions; static unsigned s_totalHoistedExpressions; static void PrintAggregateLoopHoistStats(FILE* f); #endif // LOOP_HOIST_STATS #if TRACK_ENREG_STATS class EnregisterStats { private: unsigned m_totalNumberOfVars; unsigned m_totalNumberOfStructVars; unsigned m_totalNumberOfEnregVars; unsigned m_totalNumberOfStructEnregVars; unsigned m_addrExposed; unsigned m_VMNeedsStackAddr; unsigned m_localField; unsigned m_blockOp; unsigned m_dontEnregStructs; unsigned m_notRegSizeStruct; unsigned m_structArg; unsigned m_lclAddrNode; unsigned m_castTakesAddr; unsigned m_storeBlkSrc; unsigned m_oneAsgRetyping; unsigned m_swizzleArg; unsigned m_blockOpRet; unsigned m_returnSpCheck; unsigned m_simdUserForcesDep; unsigned m_liveInOutHndlr; unsigned m_depField; unsigned m_noRegVars; unsigned m_minOptsGC; #ifdef JIT32_GCENCODER unsigned m_PinningRef; #endif // JIT32_GCENCODER #if !defined(TARGET_64BIT) unsigned m_longParamField; #endif // !TARGET_64BIT unsigned m_parentExposed; unsigned m_tooConservative; unsigned m_escapeAddress; unsigned m_osrExposed; unsigned m_stressLclFld; unsigned m_copyFldByFld; unsigned m_dispatchRetBuf; unsigned m_wideIndir; public: void RecordLocal(const LclVarDsc* varDsc); void Dump(FILE* fout) const; }; static EnregisterStats s_enregisterStats; #endif // TRACK_ENREG_STATS bool compIsForImportOnly(); bool compIsForInlining() const; bool compDonotInline(); #ifdef DEBUG // Get the default fill char value we randomize this value when JitStress is enabled. static unsigned char compGetJitDefaultFill(Compiler* comp); const char* compLocalVarName(unsigned varNum, unsigned offs); VarName compVarName(regNumber reg, bool isFloatReg = false); const char* compRegVarName(regNumber reg, bool displayVar = false, bool isFloatReg = false); const char* compRegNameForSize(regNumber reg, size_t size); const char* compFPregVarName(unsigned fpReg, bool displayVar = false); void compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP); void compDspSrcLinesByLineNum(unsigned line, bool seek = false); #endif // DEBUG //------------------------------------------------------------------------- struct VarScopeListNode { VarScopeDsc* data; VarScopeListNode* next; static VarScopeListNode* Create(VarScopeDsc* value, CompAllocator alloc) { VarScopeListNode* node = new (alloc) VarScopeListNode; node->data = value; node->next = nullptr; return node; } }; struct VarScopeMapInfo { VarScopeListNode* head; VarScopeListNode* tail; static VarScopeMapInfo* Create(VarScopeListNode* node, CompAllocator alloc) { VarScopeMapInfo* info = new (alloc) VarScopeMapInfo; info->head = node; info->tail = node; return info; } }; // Max value of scope count for which we would use linear search; for larger values we would use hashtable lookup. static const unsigned MAX_LINEAR_FIND_LCL_SCOPELIST = 32; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, VarScopeMapInfo*> VarNumToScopeDscMap; // Map to keep variables' scope indexed by varNum containing it's scope dscs at the index. VarNumToScopeDscMap* compVarScopeMap; VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd); VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned offs); VarScopeDsc* compFindLocalVarLinear(unsigned varNum, unsigned offs); void compInitVarScopeMap(); VarScopeDsc** compEnterScopeList; // List has the offsets where variables // enter scope, sorted by instr offset unsigned compNextEnterScope; VarScopeDsc** compExitScopeList; // List has the offsets where variables // go out of scope, sorted by instr offset unsigned compNextExitScope; void compInitScopeLists(); void compResetScopeLists(); VarScopeDsc* compGetNextEnterScope(unsigned offs, bool scan = false); VarScopeDsc* compGetNextExitScope(unsigned offs, bool scan = false); void compProcessScopesUntil(unsigned offset, VARSET_TP* inScope, void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*), void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*)); #ifdef DEBUG void compDispScopeLists(); #endif // DEBUG bool compIsProfilerHookNeeded(); //------------------------------------------------------------------------- /* Statistical Data Gathering */ void compJitStats(); // call this function and enable // various ifdef's below for statistical data #if CALL_ARG_STATS void compCallArgStats(); static void compDispCallArgStats(FILE* fout); #endif //------------------------------------------------------------------------- protected: #ifdef DEBUG bool skipMethod(); #endif ArenaAllocator* compArenaAllocator; public: void compFunctionTraceStart(); void compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI); protected: size_t compMaxUncheckedOffsetForNullObject; void compInitOptions(JitFlags* compileFlags); void compSetProcessor(); void compInitDebuggingInfo(); void compSetOptimizationLevel(); #ifdef TARGET_ARMARCH bool compRsvdRegCheck(FrameLayoutState curState); #endif void compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags); // Clear annotations produced during optimizations; to be used between iterations when repeating opts. void ResetOptAnnotations(); // Regenerate loop descriptors; to be used between iterations when repeating opts. void RecomputeLoopInfo(); #ifdef PROFILING_SUPPORTED // Data required for generating profiler Enter/Leave/TailCall hooks bool compProfilerHookNeeded; // Whether profiler Enter/Leave/TailCall hook needs to be generated for the method void* compProfilerMethHnd; // Profiler handle of the method being compiled. Passed as param to ELT callbacks bool compProfilerMethHndIndirected; // Whether compProfilerHandle is pointer to the handle or is an actual handle #endif public: // Assumes called as part of process shutdown; does any compiler-specific work associated with that. static void ProcessShutdownWork(ICorStaticInfo* statInfo); CompAllocator getAllocator(CompMemKind cmk = CMK_Generic) { return CompAllocator(compArenaAllocator, cmk); } CompAllocator getAllocatorGC() { return getAllocator(CMK_GC); } CompAllocator getAllocatorLoopHoist() { return getAllocator(CMK_LoopHoist); } #ifdef DEBUG CompAllocator getAllocatorDebugOnly() { return getAllocator(CMK_DebugOnly); } #endif // DEBUG /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX typeInfo XX XX XX XX Checks for type compatibility and merges types XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // Returns true if child is equal to or a subtype of parent for merge purposes // This support is necessary to suport attributes that are not described in // for example, signatures. For example, the permanent home byref (byref that // points to the gc heap), isn't a property of method signatures, therefore, // it is safe to have mismatches here (that tiCompatibleWith will not flag), // but when deciding if we need to reimport a block, we need to take these // in account bool tiMergeCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const; // Returns true if child is equal to or a subtype of parent. // normalisedForStack indicates that both types are normalised for the stack bool tiCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const; // Merges pDest and pSrc. Returns false if merge is undefined. // *pDest is modified to represent the merged type. Sets "*changed" to true // if this changes "*pDest". bool tiMergeToCommonParent(typeInfo* pDest, const typeInfo* pSrc, bool* changed) const; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX IL verification stuff XX XX XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // The following is used to track liveness of local variables, initialization // of valueclass constructors, and type safe use of IL instructions. // dynamic state info needed for verification EntryState verCurrentState; // this ptr of object type .ctors are considered intited only after // the base class ctor is called, or an alternate ctor is called. // An uninited this ptr can be used to access fields, but cannot // be used to call a member function. bool verTrackObjCtorInitState; void verInitBBEntryState(BasicBlock* block, EntryState* currentState); // Requires that "tis" is not TIS_Bottom -- it's a definite init/uninit state. void verSetThisInit(BasicBlock* block, ThisInitState tis); void verInitCurrentState(); void verResetCurrentState(BasicBlock* block, EntryState* currentState); // Merges the current verification state into the entry state of "block", return false if that merge fails, // TRUE if it succeeds. Further sets "*changed" to true if this changes the entry state of "block". bool verMergeEntryStates(BasicBlock* block, bool* changed); void verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)); void verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)); typeInfo verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef = false); // converts from jit type representation to typeInfo typeInfo verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd); // converts from jit type representation to typeInfo bool verIsSDArray(const typeInfo& ti); typeInfo verGetArrayElemType(const typeInfo& ti); typeInfo verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args); bool verIsByRefLike(const typeInfo& ti); bool verIsSafeToReturnByRef(const typeInfo& ti); // generic type variables range over types that satisfy IsBoxable bool verIsBoxable(const typeInfo& ti); void DECLSPEC_NORETURN verRaiseVerifyException(INDEBUG(const char* reason) DEBUGARG(const char* file) DEBUGARG(unsigned line)); void verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* reason) DEBUGARG(const char* file) DEBUGARG(unsigned line)); bool verCheckTailCallConstraint(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call // on a type parameter? bool speculative // If true, won't throw if verificatoin fails. Instead it will // return false to the caller. // If false, it will throw. ); bool verIsBoxedValueType(const typeInfo& ti); void verVerifyCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, bool tailCall, bool readonlyCall, // is this a "readonly." call? const BYTE* delegateCreateStart, const BYTE* codeAddr, CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName)); bool verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef); typeInfo verVerifySTIND(const typeInfo& ptr, const typeInfo& value, const typeInfo& instrType); typeInfo verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType); void verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken, const CORINFO_FIELD_INFO& fieldInfo, const typeInfo* tiThis, bool mutator, bool allowPlainStructAsThis = false); void verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode); void verVerifyThisPtrInitialised(); bool verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target); #ifdef DEBUG // One line log function. Default level is 0. Increasing it gives you // more log information // levels are currently unused: #define JITDUMP(level,...) (); void JitLogEE(unsigned level, const char* fmt, ...); bool compDebugBreak; bool compJitHaltMethod(); #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX GS Security checks for unsafe buffers XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: struct ShadowParamVarInfo { FixedBitVect* assignGroup; // the closure set of variables whose values depend on each other unsigned shadowCopy; // Lcl var num, if not valid set to BAD_VAR_NUM static bool mayNeedShadowCopy(LclVarDsc* varDsc) { #if defined(TARGET_AMD64) // GS cookie logic to create shadow slots, create trees to copy reg args to shadow // slots and update all trees to refer to shadow slots is done immediately after // fgMorph(). Lsra could potentially mark a param as DoNotEnregister after JIT determines // not to shadow a parameter. Also, LSRA could potentially spill a param which is passed // in register. Therefore, conservatively all params may need a shadow copy. Note that // GS cookie logic further checks whether the param is a ptr or an unsafe buffer before // creating a shadow slot even though this routine returns true. // // TODO-AMD64-CQ: Revisit this conservative approach as it could create more shadow slots than // required. There are two cases under which a reg arg could potentially be used from its // home location: // a) LSRA marks it as DoNotEnregister (see LinearScan::identifyCandidates()) // b) LSRA spills it // // Possible solution to address case (a) // - The conditions under which LSRA marks a varDsc as DoNotEnregister could be checked // in this routine. Note that live out of exception handler is something we may not be // able to do it here since GS cookie logic is invoked ahead of liveness computation. // Therefore, for methods with exception handling and need GS cookie check we might have // to take conservative approach. // // Possible solution to address case (b) // - Whenver a parameter passed in an argument register needs to be spilled by LSRA, we // create a new spill temp if the method needs GS cookie check. return varDsc->lvIsParam; #else // !defined(TARGET_AMD64) return varDsc->lvIsParam && !varDsc->lvIsRegArg; #endif } #ifdef DEBUG void Print() { printf("assignGroup [%p]; shadowCopy: [%d];\n", assignGroup, shadowCopy); } #endif }; GSCookie* gsGlobalSecurityCookieAddr; // Address of global cookie for unsafe buffer checks GSCookie gsGlobalSecurityCookieVal; // Value of global cookie if addr is NULL ShadowParamVarInfo* gsShadowVarInfo; // Table used by shadow param analysis code void gsGSChecksInitCookie(); // Grabs cookie variable void gsCopyShadowParams(); // Identify vulnerable params and create dhadow copies bool gsFindVulnerableParams(); // Shadow param analysis code void gsParamsToShadows(); // Insert copy code and replave param uses by shadow static fgWalkPreFn gsMarkPtrsAndAssignGroups; // Shadow param analysis tree-walk static fgWalkPreFn gsReplaceShadowParams; // Shadow param replacement tree-walk #define DEFAULT_MAX_INLINE_SIZE 100 // Methods with > DEFAULT_MAX_INLINE_SIZE IL bytes will never be inlined. // This can be overwritten by setting complus_JITInlineSize env variable. #define DEFAULT_MAX_INLINE_DEPTH 20 // Methods at more than this level deep will not be inlined #define DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE 32 // fixed locallocs of this size or smaller will convert to local buffers private: #ifdef FEATURE_JIT_METHOD_PERF JitTimer* pCompJitTimer; // Timer data structure (by phases) for current compilation. static CompTimeSummaryInfo s_compJitTimerSummary; // Summary of the Timer information for the whole run. static LPCWSTR JitTimeLogCsv(); // Retrieve the file name for CSV from ConfigDWORD. static LPCWSTR compJitTimeLogFilename; // If a log file for JIT time is desired, filename to write it to. #endif void BeginPhase(Phases phase); // Indicate the start of the given phase. void EndPhase(Phases phase); // Indicate the end of the given phase. #if MEASURE_CLRAPI_CALLS // Thin wrappers that call into JitTimer (if present). inline void CLRApiCallEnter(unsigned apix); inline void CLRApiCallLeave(unsigned apix); public: inline void CLR_API_Enter(API_ICorJitInfo_Names ename); inline void CLR_API_Leave(API_ICorJitInfo_Names ename); private: #endif #if defined(DEBUG) || defined(INLINE_DATA) // These variables are associated with maintaining SQM data about compile time. unsigned __int64 m_compCyclesAtEndOfInlining; // The thread-virtualized cycle count at the end of the inlining phase // in the current compilation. unsigned __int64 m_compCycles; // Net cycle count for current compilation DWORD m_compTickCountAtEndOfInlining; // The result of GetTickCount() (# ms since some epoch marker) at the end of // the inlining phase in the current compilation. #endif // defined(DEBUG) || defined(INLINE_DATA) // Records the SQM-relevant (cycles and tick count). Should be called after inlining is complete. // (We do this after inlining because this marks the last point at which the JIT is likely to cause // type-loading and class initialization). void RecordStateAtEndOfInlining(); // Assumes being called at the end of compilation. Update the SQM state. void RecordStateAtEndOfCompilation(); public: #if FUNC_INFO_LOGGING static LPCWSTR compJitFuncInfoFilename; // If a log file for per-function information is required, this is the // filename to write it to. static FILE* compJitFuncInfoFile; // And this is the actual FILE* to write to. #endif // FUNC_INFO_LOGGING Compiler* prevCompiler; // Previous compiler on stack for TLS Compiler* linked list for reentrant compilers. #if MEASURE_NOWAY void RecordNowayAssert(const char* filename, unsigned line, const char* condStr); #endif // MEASURE_NOWAY #ifndef FEATURE_TRACELOGGING // Should we actually fire the noway assert body and the exception handler? bool compShouldThrowOnNoway(); #else // FEATURE_TRACELOGGING // Should we actually fire the noway assert body and the exception handler? bool compShouldThrowOnNoway(const char* filename, unsigned line); // Telemetry instance to use per method compilation. JitTelemetry compJitTelemetry; // Get common parameters that have to be logged with most telemetry data. void compGetTelemetryDefaults(const char** assemblyName, const char** scopeName, const char** methodName, unsigned* methodHash); #endif // !FEATURE_TRACELOGGING #ifdef DEBUG private: NodeToTestDataMap* m_nodeTestData; static const unsigned FIRST_LOOP_HOIST_CSE_CLASS = 1000; unsigned m_loopHoistCSEClass; // LoopHoist test annotations turn into CSE requirements; we // label them with CSE Class #'s starting at FIRST_LOOP_HOIST_CSE_CLASS. // Current kept in this. public: NodeToTestDataMap* GetNodeTestData() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_nodeTestData == nullptr) { compRoot->m_nodeTestData = new (getAllocatorDebugOnly()) NodeToTestDataMap(getAllocatorDebugOnly()); } return compRoot->m_nodeTestData; } typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, int> NodeToIntMap; // Returns the set (i.e., the domain of the result map) of nodes that are keys in m_nodeTestData, and // currently occur in the AST graph. NodeToIntMap* FindReachableNodesInNodeTestData(); // Node "from" is being eliminated, and being replaced by node "to". If "from" had any associated // test data, associate that data with "to". void TransferTestDataToNode(GenTree* from, GenTree* to); // These are the methods that test that the various conditions implied by the // test attributes are satisfied. void JitTestCheckSSA(); // SSA builder tests. void JitTestCheckVN(); // Value numbering tests. #endif // DEBUG // The "FieldSeqStore", for canonicalizing field sequences. See the definition of FieldSeqStore for // operations. FieldSeqStore* m_fieldSeqStore; FieldSeqStore* GetFieldSeqStore() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_fieldSeqStore == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_FieldSeqStore, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_FieldSeqStore)); compRoot->m_fieldSeqStore = new (ialloc) FieldSeqStore(ialloc); } return compRoot->m_fieldSeqStore; } typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, FieldSeqNode*> NodeToFieldSeqMap; // Some nodes of "TYP_BYREF" or "TYP_I_IMPL" actually represent the address of a field within a struct, but since // the offset of the field is zero, there's no "GT_ADD" node. We normally attach a field sequence to the constant // that is added, but what do we do when that constant is zero, and is thus not present? We use this mechanism to // attach the field sequence directly to the address node. NodeToFieldSeqMap* m_zeroOffsetFieldMap; NodeToFieldSeqMap* GetZeroOffsetFieldMap() { // Don't need to worry about inlining here if (m_zeroOffsetFieldMap == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ZeroOffsetFieldMap, and use that for // allocation. CompAllocator ialloc(getAllocator(CMK_ZeroOffsetFieldMap)); m_zeroOffsetFieldMap = new (ialloc) NodeToFieldSeqMap(ialloc); } return m_zeroOffsetFieldMap; } // Requires that "op1" is a node of type "TYP_BYREF" or "TYP_I_IMPL". We are dereferencing this with the fields in // "fieldSeq", whose offsets are required all to be zero. Ensures that any field sequence annotation currently on // "op1" or its components is augmented by appending "fieldSeq". In practice, if "op1" is a GT_LCL_FLD, it has // a field sequence as a member; otherwise, it may be the addition of an a byref and a constant, where the const // has a field sequence -- in this case "fieldSeq" is appended to that of the constant; otherwise, we // record the the field sequence using the ZeroOffsetFieldMap described above. // // One exception above is that "op1" is a node of type "TYP_REF" where "op1" is a GT_LCL_VAR. // This happens when System.Object vtable pointer is a regular field at offset 0 in System.Private.CoreLib in // CoreRT. Such case is handled same as the default case. void fgAddFieldSeqForZeroOffset(GenTree* op1, FieldSeqNode* fieldSeq); typedef JitHashTable<const GenTree*, JitPtrKeyFuncs<GenTree>, ArrayInfo> NodeToArrayInfoMap; NodeToArrayInfoMap* m_arrayInfoMap; NodeToArrayInfoMap* GetArrayInfoMap() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_arrayInfoMap == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap)); compRoot->m_arrayInfoMap = new (ialloc) NodeToArrayInfoMap(ialloc); } return compRoot->m_arrayInfoMap; } //----------------------------------------------------------------------------------------------------------------- // Compiler::TryGetArrayInfo: // Given an indirection node, checks to see whether or not that indirection represents an array access, and // if so returns information about the array. // // Arguments: // indir - The `GT_IND` node. // arrayInfo (out) - Information about the accessed array if this function returns true. Undefined otherwise. // // Returns: // True if the `GT_IND` node represents an array access; false otherwise. bool TryGetArrayInfo(GenTreeIndir* indir, ArrayInfo* arrayInfo) { if ((indir->gtFlags & GTF_IND_ARR_INDEX) == 0) { return false; } if (indir->gtOp1->OperIs(GT_INDEX_ADDR)) { GenTreeIndexAddr* const indexAddr = indir->gtOp1->AsIndexAddr(); *arrayInfo = ArrayInfo(indexAddr->gtElemType, indexAddr->gtElemSize, indexAddr->gtElemOffset, indexAddr->gtStructElemClass); return true; } bool found = GetArrayInfoMap()->Lookup(indir, arrayInfo); assert(found); return true; } NodeToUnsignedMap* m_memorySsaMap[MemoryKindCount]; // In some cases, we want to assign intermediate SSA #'s to memory states, and know what nodes create those memory // states. (We do this for try blocks, where, if the try block doesn't do a call that loses track of the memory // state, all the possible memory states are possible initial states of the corresponding catch block(s).) NodeToUnsignedMap* GetMemorySsaMap(MemoryKind memoryKind) { if (memoryKind == GcHeap && byrefStatesMatchGcHeapStates) { // Use the same map for GCHeap and ByrefExposed when their states match. memoryKind = ByrefExposed; } assert(memoryKind < MemoryKindCount); Compiler* compRoot = impInlineRoot(); if (compRoot->m_memorySsaMap[memoryKind] == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap)); compRoot->m_memorySsaMap[memoryKind] = new (ialloc) NodeToUnsignedMap(ialloc); } return compRoot->m_memorySsaMap[memoryKind]; } // The Refany type is the only struct type whose structure is implicitly assumed by IL. We need its fields. CORINFO_CLASS_HANDLE m_refAnyClass; CORINFO_FIELD_HANDLE GetRefanyDataField() { if (m_refAnyClass == nullptr) { m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); } return info.compCompHnd->getFieldInClass(m_refAnyClass, 0); } CORINFO_FIELD_HANDLE GetRefanyTypeField() { if (m_refAnyClass == nullptr) { m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); } return info.compCompHnd->getFieldInClass(m_refAnyClass, 1); } #if VARSET_COUNTOPS static BitSetSupport::BitSetOpCounter m_varsetOpCounter; #endif #if ALLVARSET_COUNTOPS static BitSetSupport::BitSetOpCounter m_allvarsetOpCounter; #endif static HelperCallProperties s_helperCallProperties; #ifdef UNIX_AMD64_ABI static var_types GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size); static var_types GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, unsigned slotNum); static void GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1); void GetStructTypeOffset(CORINFO_CLASS_HANDLE typeHnd, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1); #endif // defined(UNIX_AMD64_ABI) void fgMorphMultiregStructArgs(GenTreeCall* call); GenTree* fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr); bool killGCRefs(GenTree* tree); }; // end of class Compiler //--------------------------------------------------------------------------------------------------------------------- // GenTreeVisitor: a flexible tree walker implemented using the curiously-recurring-template pattern. // // This class implements a configurable walker for IR trees. There are five configuration options (defaults values are // shown in parentheses): // // - ComputeStack (false): when true, the walker will push each node onto the `m_ancestors` stack. "Ancestors" is a bit // of a misnomer, as the first entry will always be the current node. // // - DoPreOrder (false): when true, the walker will invoke `TVisitor::PreOrderVisit` with the current node as an // argument before visiting the node's operands. // // - DoPostOrder (false): when true, the walker will invoke `TVisitor::PostOrderVisit` with the current node as an // argument after visiting the node's operands. // // - DoLclVarsOnly (false): when true, the walker will only invoke `TVisitor::PreOrderVisit` for lclVar nodes. // `DoPreOrder` must be true if this option is true. // // - UseExecutionOrder (false): when true, then walker will visit a node's operands in execution order (e.g. if a // binary operator has the `GTF_REVERSE_OPS` flag set, the second operand will be // visited before the first). // // At least one of `DoPreOrder` and `DoPostOrder` must be specified. // // A simple pre-order visitor might look something like the following: // // class CountingVisitor final : public GenTreeVisitor<CountingVisitor> // { // public: // enum // { // DoPreOrder = true // }; // // unsigned m_count; // // CountingVisitor(Compiler* compiler) // : GenTreeVisitor<CountingVisitor>(compiler), m_count(0) // { // } // // Compiler::fgWalkResult PreOrderVisit(GenTree* node) // { // m_count++; // } // }; // // This visitor would then be used like so: // // CountingVisitor countingVisitor(compiler); // countingVisitor.WalkTree(root); // template <typename TVisitor> class GenTreeVisitor { protected: typedef Compiler::fgWalkResult fgWalkResult; enum { ComputeStack = false, DoPreOrder = false, DoPostOrder = false, DoLclVarsOnly = false, UseExecutionOrder = false, }; Compiler* m_compiler; ArrayStack<GenTree*> m_ancestors; GenTreeVisitor(Compiler* compiler) : m_compiler(compiler), m_ancestors(compiler->getAllocator(CMK_ArrayStack)) { assert(compiler != nullptr); static_assert_no_msg(TVisitor::DoPreOrder || TVisitor::DoPostOrder); static_assert_no_msg(!TVisitor::DoLclVarsOnly || TVisitor::DoPreOrder); } fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { return fgWalkResult::WALK_CONTINUE; } fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { return fgWalkResult::WALK_CONTINUE; } public: fgWalkResult WalkTree(GenTree** use, GenTree* user) { assert(use != nullptr); GenTree* node = *use; if (TVisitor::ComputeStack) { m_ancestors.Push(node); } fgWalkResult result = fgWalkResult::WALK_CONTINUE; if (TVisitor::DoPreOrder && !TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } node = *use; if ((node == nullptr) || (result == fgWalkResult::WALK_SKIP_SUBTREES)) { goto DONE; } } switch (node->OperGet()) { // Leaf lclVars case GT_LCL_VAR: case GT_LCL_FLD: case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: if (TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } } FALLTHROUGH; // Leaf nodes case GT_CATCH_ARG: case GT_LABEL: case GT_FTN_ADDR: case GT_RET_EXPR: case GT_CNS_INT: case GT_CNS_LNG: case GT_CNS_DBL: case GT_CNS_STR: case GT_MEMORYBARRIER: case GT_JMP: case GT_JCC: case GT_SETCC: case GT_NO_OP: case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: #if !defined(FEATURE_EH_FUNCLETS) case GT_END_LFIN: #endif // !FEATURE_EH_FUNCLETS case GT_PHI_ARG: case GT_JMPTABLE: case GT_CLS_VAR: case GT_CLS_VAR_ADDR: case GT_ARGPLACE: case GT_PHYSREG: case GT_EMITNOP: case GT_PINVOKE_PROLOG: case GT_PINVOKE_EPILOG: case GT_IL_OFFSET: break; // Lclvar unary operators case GT_STORE_LCL_VAR: case GT_STORE_LCL_FLD: if (TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } } FALLTHROUGH; // Standard unary operators case GT_NOT: case GT_NEG: case GT_BSWAP: case GT_BSWAP16: case GT_COPY: case GT_RELOAD: case GT_ARR_LENGTH: case GT_CAST: case GT_BITCAST: case GT_CKFINITE: case GT_LCLHEAP: case GT_ADDR: case GT_IND: case GT_OBJ: case GT_BLK: case GT_BOX: case GT_ALLOCOBJ: case GT_INIT_VAL: case GT_JTRUE: case GT_SWITCH: case GT_NULLCHECK: case GT_PUTARG_REG: case GT_PUTARG_STK: case GT_PUTARG_TYPE: case GT_RETURNTRAP: case GT_NOP: case GT_FIELD: case GT_RETURN: case GT_RETFILT: case GT_RUNTIMELOOKUP: case GT_KEEPALIVE: case GT_INC_SATURATE: { GenTreeUnOp* const unOp = node->AsUnOp(); if (unOp->gtOp1 != nullptr) { result = WalkTree(&unOp->gtOp1, unOp); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } // Special nodes case GT_PHI: for (GenTreePhi::Use& use : node->AsPhi()->Uses()) { result = WalkTree(&use.NodeRef(), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; case GT_FIELD_LIST: for (GenTreeFieldList::Use& use : node->AsFieldList()->Uses()) { result = WalkTree(&use.NodeRef(), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; case GT_CMPXCHG: { GenTreeCmpXchg* const cmpXchg = node->AsCmpXchg(); result = WalkTree(&cmpXchg->gtOpLocation, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&cmpXchg->gtOpValue, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&cmpXchg->gtOpComparand, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_ARR_ELEM: { GenTreeArrElem* const arrElem = node->AsArrElem(); result = WalkTree(&arrElem->gtArrObj, arrElem); if (result == fgWalkResult::WALK_ABORT) { return result; } const unsigned rank = arrElem->gtArrRank; for (unsigned dim = 0; dim < rank; dim++) { result = WalkTree(&arrElem->gtArrInds[dim], arrElem); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } case GT_ARR_OFFSET: { GenTreeArrOffs* const arrOffs = node->AsArrOffs(); result = WalkTree(&arrOffs->gtOffset, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&arrOffs->gtIndex, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&arrOffs->gtArrObj, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_STORE_DYN_BLK: { GenTreeStoreDynBlk* const dynBlock = node->AsStoreDynBlk(); GenTree** op1Use = &dynBlock->gtOp1; GenTree** op2Use = &dynBlock->gtOp2; GenTree** op3Use = &dynBlock->gtDynamicSize; result = WalkTree(op1Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(op2Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(op3Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_CALL: { GenTreeCall* const call = node->AsCall(); if (call->gtCallThisArg != nullptr) { result = WalkTree(&call->gtCallThisArg->NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } for (GenTreeCall::Use& use : call->Args()) { result = WalkTree(&use.NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } for (GenTreeCall::Use& use : call->LateArgs()) { result = WalkTree(&use.NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (call->gtCallType == CT_INDIRECT) { if (call->gtCallCookie != nullptr) { result = WalkTree(&call->gtCallCookie, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } result = WalkTree(&call->gtCallAddr, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (call->gtControlExpr != nullptr) { result = WalkTree(&call->gtControlExpr, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) #if defined(FEATURE_SIMD) case GT_SIMD: #endif #if defined(FEATURE_HW_INTRINSICS) case GT_HWINTRINSIC: #endif if (TVisitor::UseExecutionOrder && node->IsReverseOp()) { assert(node->AsMultiOp()->GetOperandCount() == 2); result = WalkTree(&node->AsMultiOp()->Op(2), node); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&node->AsMultiOp()->Op(1), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } else { for (GenTree** use : node->AsMultiOp()->UseEdges()) { result = WalkTree(use, node); if (result == fgWalkResult::WALK_ABORT) { return result; } } } break; #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) // Binary nodes default: { assert(node->OperIsBinary()); GenTreeOp* const op = node->AsOp(); GenTree** op1Use = &op->gtOp1; GenTree** op2Use = &op->gtOp2; if (TVisitor::UseExecutionOrder && node->IsReverseOp()) { std::swap(op1Use, op2Use); } if (*op1Use != nullptr) { result = WalkTree(op1Use, op); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (*op2Use != nullptr) { result = WalkTree(op2Use, op); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } } DONE: // Finally, visit the current node if (TVisitor::DoPostOrder) { result = reinterpret_cast<TVisitor*>(this)->PostOrderVisit(use, user); } if (TVisitor::ComputeStack) { m_ancestors.Pop(); } return result; } }; template <bool computeStack, bool doPreOrder, bool doPostOrder, bool doLclVarsOnly, bool useExecutionOrder> class GenericTreeWalker final : public GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>> { public: enum { ComputeStack = computeStack, DoPreOrder = doPreOrder, DoPostOrder = doPostOrder, DoLclVarsOnly = doLclVarsOnly, UseExecutionOrder = useExecutionOrder, }; private: Compiler::fgWalkData* m_walkData; public: GenericTreeWalker(Compiler::fgWalkData* walkData) : GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>>( walkData->compiler) , m_walkData(walkData) { assert(walkData != nullptr); if (computeStack) { walkData->parentStack = &this->m_ancestors; } } Compiler::fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { m_walkData->parent = user; return m_walkData->wtprVisitorFn(use, m_walkData); } Compiler::fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { m_walkData->parent = user; return m_walkData->wtpoVisitorFn(use, m_walkData); } }; // A dominator tree visitor implemented using the curiously-recurring-template pattern, similar to GenTreeVisitor. template <typename TVisitor> class DomTreeVisitor { protected: Compiler* const m_compiler; DomTreeNode* const m_domTree; DomTreeVisitor(Compiler* compiler, DomTreeNode* domTree) : m_compiler(compiler), m_domTree(domTree) { } void Begin() { } void PreOrderVisit(BasicBlock* block) { } void PostOrderVisit(BasicBlock* block) { } void End() { } public: //------------------------------------------------------------------------ // WalkTree: Walk the dominator tree, starting from fgFirstBB. // // Notes: // This performs a non-recursive, non-allocating walk of the tree by using // DomTreeNode's firstChild and nextSibling links to locate the children of // a node and BasicBlock's bbIDom parent link to go back up the tree when // no more children are left. // // Forests are also supported, provided that all the roots are chained via // DomTreeNode::nextSibling to fgFirstBB. // void WalkTree() { static_cast<TVisitor*>(this)->Begin(); for (BasicBlock *next, *block = m_compiler->fgFirstBB; block != nullptr; block = next) { static_cast<TVisitor*>(this)->PreOrderVisit(block); next = m_domTree[block->bbNum].firstChild; if (next != nullptr) { assert(next->bbIDom == block); continue; } do { static_cast<TVisitor*>(this)->PostOrderVisit(block); next = m_domTree[block->bbNum].nextSibling; if (next != nullptr) { assert(next->bbIDom == block->bbIDom); break; } block = block->bbIDom; } while (block != nullptr); } static_cast<TVisitor*>(this)->End(); } }; // EHClauses: adapter class for forward iteration of the exception handling table using range-based `for`, e.g.: // for (EHblkDsc* const ehDsc : EHClauses(compiler)) // class EHClauses { EHblkDsc* m_begin; EHblkDsc* m_end; // Forward iterator for the exception handling table entries. Iteration is in table order. // class iterator { EHblkDsc* m_ehDsc; public: iterator(EHblkDsc* ehDsc) : m_ehDsc(ehDsc) { } EHblkDsc* operator*() const { return m_ehDsc; } iterator& operator++() { ++m_ehDsc; return *this; } bool operator!=(const iterator& i) const { return m_ehDsc != i.m_ehDsc; } }; public: EHClauses(Compiler* comp) : m_begin(comp->compHndBBtab), m_end(comp->compHndBBtab + comp->compHndBBtabCount) { assert((m_begin != nullptr) || (m_begin == m_end)); } iterator begin() const { return iterator(m_begin); } iterator end() const { return iterator(m_end); } }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Miscellaneous Compiler stuff XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // Values used to mark the types a stack slot is used for const unsigned TYPE_REF_INT = 0x01; // slot used as a 32-bit int const unsigned TYPE_REF_LNG = 0x02; // slot used as a 64-bit long const unsigned TYPE_REF_FLT = 0x04; // slot used as a 32-bit float const unsigned TYPE_REF_DBL = 0x08; // slot used as a 64-bit float const unsigned TYPE_REF_PTR = 0x10; // slot used as a 32-bit pointer const unsigned TYPE_REF_BYR = 0x20; // slot used as a byref pointer const unsigned TYPE_REF_STC = 0x40; // slot used as a struct const unsigned TYPE_REF_TYPEMASK = 0x7F; // bits that represent the type // const unsigned TYPE_REF_ADDR_TAKEN = 0x80; // slots address was taken /***************************************************************************** * * Variables to keep track of total code amounts. */ #if DISPLAY_SIZES extern size_t grossVMsize; extern size_t grossNCsize; extern size_t totalNCsize; extern unsigned genMethodICnt; extern unsigned genMethodNCnt; extern size_t gcHeaderISize; extern size_t gcPtrMapISize; extern size_t gcHeaderNSize; extern size_t gcPtrMapNSize; #endif // DISPLAY_SIZES /***************************************************************************** * * Variables to keep track of basic block counts (more data on 1 BB methods) */ #if COUNT_BASIC_BLOCKS extern Histogram bbCntTable; extern Histogram bbOneBBSizeTable; #endif /***************************************************************************** * * Used by optFindNaturalLoops to gather statistical information such as * - total number of natural loops * - number of loops with 1, 2, ... exit conditions * - number of loops that have an iterator (for like) * - number of loops that have a constant iterator */ #if COUNT_LOOPS extern unsigned totalLoopMethods; // counts the total number of methods that have natural loops extern unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has extern unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent extern unsigned totalLoopCount; // counts the total number of natural loops extern unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops extern unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent extern unsigned iterLoopCount; // counts the # of loops with an iterator (for like) extern unsigned simpleTestLoopCount; // counts the # of loops with an iterator and a simple loop condition (iter < // const) extern unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like) extern bool hasMethodLoops; // flag to keep track if we already counted a method as having loops extern unsigned loopsThisMethod; // counts the number of loops in the current method extern bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method. extern Histogram loopCountTable; // Histogram of loop counts extern Histogram loopExitCountTable; // Histogram of loop exit counts #endif // COUNT_LOOPS /***************************************************************************** * variables to keep track of how many iterations we go in a dataflow pass */ #if DATAFLOW_ITER extern unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow extern unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow #endif // DATAFLOW_ITER #if MEASURE_BLOCK_SIZE extern size_t genFlowNodeSize; extern size_t genFlowNodeCnt; #endif // MEASURE_BLOCK_SIZE #if MEASURE_NODE_SIZE struct NodeSizeStats { void Init() { genTreeNodeCnt = 0; genTreeNodeSize = 0; genTreeNodeActualSize = 0; } // Count of tree nodes allocated. unsigned __int64 genTreeNodeCnt; // The size we allocate. unsigned __int64 genTreeNodeSize; // The actual size of the node. Note that the actual size will likely be smaller // than the allocated size, but we sometimes use SetOper()/ChangeOper() to change // a smaller node to a larger one. TODO-Cleanup: add stats on // SetOper()/ChangeOper() usage to quantify this. unsigned __int64 genTreeNodeActualSize; }; extern NodeSizeStats genNodeSizeStats; // Total node size stats extern NodeSizeStats genNodeSizeStatsPerFunc; // Per-function node size stats extern Histogram genTreeNcntHist; extern Histogram genTreeNsizHist; #endif // MEASURE_NODE_SIZE /***************************************************************************** * Count fatal errors (including noway_asserts). */ #if MEASURE_FATAL extern unsigned fatal_badCode; extern unsigned fatal_noWay; extern unsigned fatal_implLimitation; extern unsigned fatal_NOMEM; extern unsigned fatal_noWayAssertBody; #ifdef DEBUG extern unsigned fatal_noWayAssertBodyArgs; #endif // DEBUG extern unsigned fatal_NYI; #endif // MEASURE_FATAL /***************************************************************************** * Codegen */ #ifdef TARGET_XARCH const instruction INS_SHIFT_LEFT_LOGICAL = INS_shl; const instruction INS_SHIFT_RIGHT_LOGICAL = INS_shr; const instruction INS_SHIFT_RIGHT_ARITHM = INS_sar; const instruction INS_AND = INS_and; const instruction INS_OR = INS_or; const instruction INS_XOR = INS_xor; const instruction INS_NEG = INS_neg; const instruction INS_TEST = INS_test; const instruction INS_MUL = INS_imul; const instruction INS_SIGNED_DIVIDE = INS_idiv; const instruction INS_UNSIGNED_DIVIDE = INS_div; const instruction INS_BREAKPOINT = INS_int3; const instruction INS_ADDC = INS_adc; const instruction INS_SUBC = INS_sbb; const instruction INS_NOT = INS_not; #endif // TARGET_XARCH #ifdef TARGET_ARM const instruction INS_SHIFT_LEFT_LOGICAL = INS_lsl; const instruction INS_SHIFT_RIGHT_LOGICAL = INS_lsr; const instruction INS_SHIFT_RIGHT_ARITHM = INS_asr; const instruction INS_AND = INS_and; const instruction INS_OR = INS_orr; const instruction INS_XOR = INS_eor; const instruction INS_NEG = INS_rsb; const instruction INS_TEST = INS_tst; const instruction INS_MUL = INS_mul; const instruction INS_MULADD = INS_mla; const instruction INS_SIGNED_DIVIDE = INS_sdiv; const instruction INS_UNSIGNED_DIVIDE = INS_udiv; const instruction INS_BREAKPOINT = INS_bkpt; const instruction INS_ADDC = INS_adc; const instruction INS_SUBC = INS_sbc; const instruction INS_NOT = INS_mvn; const instruction INS_ABS = INS_vabs; const instruction INS_SQRT = INS_vsqrt; #endif // TARGET_ARM #ifdef TARGET_ARM64 const instruction INS_MULADD = INS_madd; inline const instruction INS_BREAKPOINT_osHelper() { // GDB needs the encoding of brk #0 // Windbg needs the encoding of brk #F000 return TargetOS::IsUnix ? INS_brk_unix : INS_brk_windows; } #define INS_BREAKPOINT INS_BREAKPOINT_osHelper() const instruction INS_ABS = INS_fabs; const instruction INS_SQRT = INS_fsqrt; #endif // TARGET_ARM64 /*****************************************************************************/ extern const BYTE genTypeSizes[]; extern const BYTE genTypeAlignments[]; extern const BYTE genTypeStSzs[]; extern const BYTE genActualTypes[]; /*****************************************************************************/ #ifdef DEBUG void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars); #endif // DEBUG #include "compiler.hpp" // All the shared inline functions /*****************************************************************************/ #endif //_COMPILER_H_ /*****************************************************************************/
1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/jit/hwintrinsic.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "jitpch.h" #include "hwintrinsic.h" #ifdef FEATURE_HW_INTRINSICS static const HWIntrinsicInfo hwIntrinsicInfoArray[] = { // clang-format off #if defined(TARGET_XARCH) #define HARDWARE_INTRINSIC(isa, name, size, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, flag) \ {NI_##isa##_##name, #name, InstructionSet_##isa, size, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, static_cast<HWIntrinsicFlag>(flag)}, #include "hwintrinsiclistxarch.h" #elif defined (TARGET_ARM64) #define HARDWARE_INTRINSIC(isa, name, size, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, flag) \ {NI_##isa##_##name, #name, InstructionSet_##isa, size, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, static_cast<HWIntrinsicFlag>(flag)}, #include "hwintrinsiclistarm64.h" #else #error Unsupported platform #endif // clang-format on }; //------------------------------------------------------------------------ // lookup: Gets the HWIntrinsicInfo associated with a given NamedIntrinsic // // Arguments: // id -- The NamedIntrinsic associated with the HWIntrinsic to lookup // // Return Value: // The HWIntrinsicInfo associated with id const HWIntrinsicInfo& HWIntrinsicInfo::lookup(NamedIntrinsic id) { assert(id != NI_Illegal); assert(id > NI_HW_INTRINSIC_START); assert(id < NI_HW_INTRINSIC_END); return hwIntrinsicInfoArray[id - NI_HW_INTRINSIC_START - 1]; } //------------------------------------------------------------------------ // getBaseJitTypeFromArgIfNeeded: Get simdBaseJitType of intrinsic from 1st or 2nd argument depending on the flag // // Arguments: // intrinsic -- id of the intrinsic function. // clsHnd -- class handle containing the intrinsic function. // method -- method handle of the intrinsic function. // sig -- signature of the intrinsic call. // simdBaseJitType -- Predetermined simdBaseJitType, could be CORINFO_TYPE_UNDEF // // Return Value: // The basetype of intrinsic of it can be fetched from 1st or 2nd argument, else return baseType unmodified. // CorInfoType Compiler::getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType) { if (HWIntrinsicInfo::BaseTypeFromSecondArg(intrinsic) || HWIntrinsicInfo::BaseTypeFromFirstArg(intrinsic)) { CORINFO_ARG_LIST_HANDLE arg = sig->args; if (HWIntrinsicInfo::BaseTypeFromSecondArg(intrinsic)) { arg = info.compCompHnd->getArgNext(arg); } CORINFO_CLASS_HANDLE argClass = info.compCompHnd->getArgClass(sig, arg); simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(argClass); if (simdBaseJitType == CORINFO_TYPE_UNDEF) // the argument is not a vector { CORINFO_CLASS_HANDLE tmpClass; simdBaseJitType = strip(info.compCompHnd->getArgType(sig, arg, &tmpClass)); if (simdBaseJitType == CORINFO_TYPE_PTR) { simdBaseJitType = info.compCompHnd->getChildType(argClass, &tmpClass); } } assert(simdBaseJitType != CORINFO_TYPE_UNDEF); } return simdBaseJitType; } CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleForHWSIMD(var_types simdType, CorInfoType simdBaseJitType) { if (m_simdHandleCache == nullptr) { return NO_CLASS_HANDLE; } if (simdType == TYP_SIMD16) { switch (simdBaseJitType) { case CORINFO_TYPE_FLOAT: return m_simdHandleCache->Vector128FloatHandle; case CORINFO_TYPE_DOUBLE: return m_simdHandleCache->Vector128DoubleHandle; case CORINFO_TYPE_INT: return m_simdHandleCache->Vector128IntHandle; case CORINFO_TYPE_USHORT: return m_simdHandleCache->Vector128UShortHandle; case CORINFO_TYPE_UBYTE: return m_simdHandleCache->Vector128UByteHandle; case CORINFO_TYPE_SHORT: return m_simdHandleCache->Vector128ShortHandle; case CORINFO_TYPE_BYTE: return m_simdHandleCache->Vector128ByteHandle; case CORINFO_TYPE_LONG: return m_simdHandleCache->Vector128LongHandle; case CORINFO_TYPE_UINT: return m_simdHandleCache->Vector128UIntHandle; case CORINFO_TYPE_ULONG: return m_simdHandleCache->Vector128ULongHandle; case CORINFO_TYPE_NATIVEINT: return m_simdHandleCache->Vector128NIntHandle; case CORINFO_TYPE_NATIVEUINT: return m_simdHandleCache->Vector128NUIntHandle; default: assert(!"Didn't find a class handle for simdType"); } } #ifdef TARGET_XARCH else if (simdType == TYP_SIMD32) { switch (simdBaseJitType) { case CORINFO_TYPE_FLOAT: return m_simdHandleCache->Vector256FloatHandle; case CORINFO_TYPE_DOUBLE: return m_simdHandleCache->Vector256DoubleHandle; case CORINFO_TYPE_INT: return m_simdHandleCache->Vector256IntHandle; case CORINFO_TYPE_USHORT: return m_simdHandleCache->Vector256UShortHandle; case CORINFO_TYPE_UBYTE: return m_simdHandleCache->Vector256UByteHandle; case CORINFO_TYPE_SHORT: return m_simdHandleCache->Vector256ShortHandle; case CORINFO_TYPE_BYTE: return m_simdHandleCache->Vector256ByteHandle; case CORINFO_TYPE_LONG: return m_simdHandleCache->Vector256LongHandle; case CORINFO_TYPE_UINT: return m_simdHandleCache->Vector256UIntHandle; case CORINFO_TYPE_ULONG: return m_simdHandleCache->Vector256ULongHandle; case CORINFO_TYPE_NATIVEINT: return m_simdHandleCache->Vector256NIntHandle; case CORINFO_TYPE_NATIVEUINT: return m_simdHandleCache->Vector256NUIntHandle; default: assert(!"Didn't find a class handle for simdType"); } } #endif // TARGET_XARCH #ifdef TARGET_ARM64 else if (simdType == TYP_SIMD8) { switch (simdBaseJitType) { case CORINFO_TYPE_FLOAT: return m_simdHandleCache->Vector64FloatHandle; case CORINFO_TYPE_DOUBLE: return m_simdHandleCache->Vector64DoubleHandle; case CORINFO_TYPE_INT: return m_simdHandleCache->Vector64IntHandle; case CORINFO_TYPE_USHORT: return m_simdHandleCache->Vector64UShortHandle; case CORINFO_TYPE_UBYTE: return m_simdHandleCache->Vector64UByteHandle; case CORINFO_TYPE_SHORT: return m_simdHandleCache->Vector64ShortHandle; case CORINFO_TYPE_BYTE: return m_simdHandleCache->Vector64ByteHandle; case CORINFO_TYPE_UINT: return m_simdHandleCache->Vector64UIntHandle; case CORINFO_TYPE_LONG: return m_simdHandleCache->Vector64LongHandle; case CORINFO_TYPE_ULONG: return m_simdHandleCache->Vector64ULongHandle; case CORINFO_TYPE_NATIVEINT: return m_simdHandleCache->Vector64NIntHandle; case CORINFO_TYPE_NATIVEUINT: return m_simdHandleCache->Vector64NUIntHandle; default: assert(!"Didn't find a class handle for simdType"); } } #endif // TARGET_ARM64 return NO_CLASS_HANDLE; } //------------------------------------------------------------------------ // vnEncodesResultTypeForHWIntrinsic(NamedIntrinsic hwIntrinsicID): // // Arguments: // hwIntrinsicID -- The id for the HW intrinsic // // Return Value: // Returns true if this intrinsic requires value numbering to add an // extra SimdType argument that encodes the resulting type. // If we don't do this overloaded versions can return the same VN // leading to incorrect CSE subsitutions. // /* static */ bool Compiler::vnEncodesResultTypeForHWIntrinsic(NamedIntrinsic hwIntrinsicID) { int numArgs = HWIntrinsicInfo::lookupNumArgs(hwIntrinsicID); // HW Intrinsic's with -1 for numArgs have a varying number of args, so we currently // give themm a unique value number them, and don't add an extra argument. // if (numArgs == -1) { return false; } // We iterate over all of the different baseType's for this intrinsic in the HWIntrinsicInfo table // We set diffInsCount to the number of instructions that can execute differently. // unsigned diffInsCount = 0; #ifdef TARGET_XARCH instruction lastIns = INS_invalid; #endif for (var_types baseType = TYP_BYTE; (baseType <= TYP_DOUBLE); baseType = (var_types)(baseType + 1)) { instruction curIns = HWIntrinsicInfo::lookupIns(hwIntrinsicID, baseType); if (curIns != INS_invalid) { #ifdef TARGET_XARCH if (curIns != lastIns) { diffInsCount++; // remember the last valid instruction that we saw lastIns = curIns; } #elif defined(TARGET_ARM64) // On ARM64 we use the same instruction and specify an insOpt arrangement // so we always consider the instruction operation to be different // diffInsCount++; #endif // TARGET if (diffInsCount >= 2) { // We can early exit the loop now break; } } } // If we see two (or more) different instructions we need the extra VNF_SimdType arg return (diffInsCount >= 2); } //------------------------------------------------------------------------ // lookupId: Gets the NamedIntrinsic for a given method name and InstructionSet // // Arguments: // comp -- The compiler // sig -- The signature of the intrinsic // className -- The name of the class associated with the HWIntrinsic to lookup // methodName -- The name of the method associated with the HWIntrinsic to lookup // enclosingClassName -- The name of the enclosing class of X64 classes // // Return Value: // The NamedIntrinsic associated with methodName and isa NamedIntrinsic HWIntrinsicInfo::lookupId(Compiler* comp, CORINFO_SIG_INFO* sig, const char* className, const char* methodName, const char* enclosingClassName) { // TODO-Throughput: replace sequential search by binary search CORINFO_InstructionSet isa = lookupIsa(className, enclosingClassName); if (isa == InstructionSet_ILLEGAL) { return NI_Illegal; } bool isIsaSupported = comp->compSupportsHWIntrinsic(isa); bool isHardwareAcceleratedProp = (strcmp(methodName, "get_IsHardwareAccelerated") == 0); #ifdef TARGET_XARCH if (isHardwareAcceleratedProp) { // Special case: Some of Vector128/256 APIs are hardware accelerated with Sse1 and Avx1, // but we want IsHardwareAccelerated to return true only when all of them are (there are // still can be cases where e.g. Sse41 might give an additional boost for Vector128, but it's // not important enough to bump the minimal Sse version here) if (strcmp(className, "Vector128") == 0) { isa = InstructionSet_SSE2; } else if (strcmp(className, "Vector256") == 0) { isa = InstructionSet_AVX2; } } #endif if ((strcmp(methodName, "get_IsSupported") == 0) || isHardwareAcceleratedProp) { return isIsaSupported ? (comp->compExactlyDependsOn(isa) ? NI_IsSupported_True : NI_IsSupported_Dynamic) : NI_IsSupported_False; } else if (!isIsaSupported) { return NI_Throw_PlatformNotSupportedException; } for (int i = 0; i < (NI_HW_INTRINSIC_END - NI_HW_INTRINSIC_START - 1); i++) { const HWIntrinsicInfo& intrinsicInfo = hwIntrinsicInfoArray[i]; if (isa != hwIntrinsicInfoArray[i].isa) { continue; } int numArgs = static_cast<unsigned>(intrinsicInfo.numArgs); if ((numArgs != -1) && (sig->numArgs != static_cast<unsigned>(intrinsicInfo.numArgs))) { continue; } if (strcmp(methodName, intrinsicInfo.name) == 0) { return intrinsicInfo.id; } } // There are several helper intrinsics that are implemented in managed code // Those intrinsics will hit this code path and need to return NI_Illegal return NI_Illegal; } //------------------------------------------------------------------------ // lookupSimdSize: Gets the SimdSize for a given HWIntrinsic and signature // // Arguments: // id -- The ID associated with the HWIntrinsic to lookup // sig -- The signature of the HWIntrinsic to lookup // // Return Value: // The SIMD size for the HWIntrinsic associated with id and sig // // Remarks: // This function is only used by the importer. After importation, we can // get the SIMD size from the GenTreeHWIntrinsic node. unsigned HWIntrinsicInfo::lookupSimdSize(Compiler* comp, NamedIntrinsic id, CORINFO_SIG_INFO* sig) { unsigned simdSize = 0; if (tryLookupSimdSize(id, &simdSize)) { return simdSize; } CORINFO_CLASS_HANDLE typeHnd = nullptr; if (HWIntrinsicInfo::BaseTypeFromFirstArg(id)) { typeHnd = comp->info.compCompHnd->getArgClass(sig, sig->args); } else if (HWIntrinsicInfo::BaseTypeFromSecondArg(id)) { CORINFO_ARG_LIST_HANDLE secondArg = comp->info.compCompHnd->getArgNext(sig->args); typeHnd = comp->info.compCompHnd->getArgClass(sig, secondArg); } else { assert(JITtype2varType(sig->retType) == TYP_STRUCT); typeHnd = sig->retTypeSigClass; } CorInfoType simdBaseJitType = comp->getBaseJitTypeAndSizeOfSIMDType(typeHnd, &simdSize); assert((simdSize > 0) && (simdBaseJitType != CORINFO_TYPE_UNDEF)); return simdSize; } //------------------------------------------------------------------------ // isImmOp: Checks whether the HWIntrinsic node has an imm operand // // Arguments: // id -- The NamedIntrinsic associated with the HWIntrinsic to lookup // op -- The operand to check // // Return Value: // true if the node has an imm operand; otherwise, false bool HWIntrinsicInfo::isImmOp(NamedIntrinsic id, const GenTree* op) { #ifdef TARGET_XARCH if (HWIntrinsicInfo::lookupCategory(id) != HW_Category_IMM) { return false; } if (!HWIntrinsicInfo::MaybeImm(id)) { return true; } #elif defined(TARGET_ARM64) if (!HWIntrinsicInfo::HasImmediateOperand(id)) { return false; } #else #error Unsupported platform #endif if (genActualType(op->TypeGet()) != TYP_INT) { return false; } return true; } //------------------------------------------------------------------------ // getArgForHWIntrinsic: pop an argument from the stack and validate its type // // Arguments: // argType -- the required type of argument // argClass -- the class handle of argType // expectAddr -- if true indicates we are expecting type stack entry to be a TYP_BYREF. // newobjThis -- For CEE_NEWOBJ, this is the temp grabbed for the allocated uninitalized object. // // Return Value: // the validated argument // GenTree* Compiler::getArgForHWIntrinsic(var_types argType, CORINFO_CLASS_HANDLE argClass, bool expectAddr, GenTree* newobjThis) { GenTree* arg = nullptr; if (varTypeIsStruct(argType)) { if (!varTypeIsSIMD(argType)) { unsigned int argSizeBytes; (void)getBaseJitTypeAndSizeOfSIMDType(argClass, &argSizeBytes); argType = getSIMDTypeForSize(argSizeBytes); } assert(varTypeIsSIMD(argType)); if (newobjThis == nullptr) { arg = impSIMDPopStack(argType, expectAddr); assert(varTypeIsSIMD(arg->TypeGet())); } else { assert((newobjThis->gtOper == GT_ADDR) && (newobjThis->AsOp()->gtOp1->gtOper == GT_LCL_VAR)); arg = newobjThis; // push newobj result on type stack unsigned tmp = arg->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(argClass).NormaliseForStack()); } } else { assert(varTypeIsArithmetic(argType)); arg = impPopStack().val; assert(varTypeIsArithmetic(arg->TypeGet())); assert(genActualType(arg->gtType) == genActualType(argType)); } return arg; } //------------------------------------------------------------------------ // addRangeCheckIfNeeded: add a GT_BOUNDS_CHECK node for non-full-range imm-intrinsic // // Arguments: // intrinsic -- intrinsic ID // immOp -- the immediate operand of the intrinsic // mustExpand -- true if the compiler is compiling the fallback(GT_CALL) of this intrinsics // immLowerBound -- lower incl. bound for a value of the immediate operand (for a non-full-range imm-intrinsic) // immUpperBound -- upper incl. bound for a value of the immediate operand (for a non-full-range imm-intrinsic) // // Return Value: // add a GT_BOUNDS_CHECK node for non-full-range imm-intrinsic, which would throw ArgumentOutOfRangeException // when the imm-argument is not in the valid range // GenTree* Compiler::addRangeCheckIfNeeded( NamedIntrinsic intrinsic, GenTree* immOp, bool mustExpand, int immLowerBound, int immUpperBound) { assert(immOp != nullptr); // Full-range imm-intrinsics do not need the range-check // because the imm-parameter of the intrinsic method is a byte. // AVX2 Gather intrinsics no not need the range-check // because their imm-parameter have discrete valid values that are handle by managed code if (mustExpand && HWIntrinsicInfo::isImmOp(intrinsic, immOp) #ifdef TARGET_XARCH && !HWIntrinsicInfo::isAVX2GatherIntrinsic(intrinsic) && !HWIntrinsicInfo::HasFullRangeImm(intrinsic) #endif ) { assert(!immOp->IsCnsIntOrI()); assert(varTypeIsUnsigned(immOp)); return addRangeCheckForHWIntrinsic(immOp, immLowerBound, immUpperBound); } else { return immOp; } } //------------------------------------------------------------------------ // addRangeCheckForHWIntrinsic: add a GT_BOUNDS_CHECK node for an intrinsic // // Arguments: // immOp -- the immediate operand of the intrinsic // immLowerBound -- lower incl. bound for a value of the immediate operand (for a non-full-range imm-intrinsic) // immUpperBound -- upper incl. bound for a value of the immediate operand (for a non-full-range imm-intrinsic) // // Return Value: // add a GT_BOUNDS_CHECK node for non-full-range imm-intrinsic, which would throw ArgumentOutOfRangeException // when the imm-argument is not in the valid range // GenTree* Compiler::addRangeCheckForHWIntrinsic(GenTree* immOp, int immLowerBound, int immUpperBound) { // Bounds check for value of an immediate operand // (immLowerBound <= immOp) && (immOp <= immUpperBound) // // implemented as a single comparison in the form of // // if ((immOp - immLowerBound) >= (immUpperBound - immLowerBound + 1)) // { // throw new ArgumentOutOfRangeException(); // } // // The value of (immUpperBound - immLowerBound + 1) is denoted as adjustedUpperBound. const ssize_t adjustedUpperBound = (ssize_t)immUpperBound - immLowerBound + 1; GenTree* adjustedUpperBoundNode = gtNewIconNode(adjustedUpperBound, TYP_INT); GenTree* immOpDup = nullptr; immOp = impCloneExpr(immOp, &immOpDup, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone an immediate operand for immediate value bounds check")); if (immLowerBound != 0) { immOpDup = gtNewOperNode(GT_SUB, TYP_INT, immOpDup, gtNewIconNode(immLowerBound, TYP_INT)); } GenTreeBoundsChk* hwIntrinsicChk = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(immOpDup, adjustedUpperBoundNode, SCK_ARG_RNG_EXCPN); return gtNewOperNode(GT_COMMA, immOp->TypeGet(), hwIntrinsicChk, immOp); } //------------------------------------------------------------------------ // compSupportsHWIntrinsic: check whether a given instruction is enabled via configuration // // Arguments: // isa - Instruction set // // Return Value: // true iff the given instruction set is enabled via configuration (environment variables, etc.). bool Compiler::compSupportsHWIntrinsic(CORINFO_InstructionSet isa) { return compHWIntrinsicDependsOn(isa) && (featureSIMD || HWIntrinsicInfo::isScalarIsa(isa)) && ( #ifdef DEBUG JitConfig.EnableIncompleteISAClass() || #endif HWIntrinsicInfo::isFullyImplementedIsa(isa)); } //------------------------------------------------------------------------ // impIsTableDrivenHWIntrinsic: // // Arguments: // intrinsicId - HW intrinsic id // category - category of a HW intrinsic // // Return Value: // returns true if this category can be table-driven in the importer // static bool impIsTableDrivenHWIntrinsic(NamedIntrinsic intrinsicId, HWIntrinsicCategory category) { return (category != HW_Category_Special) && HWIntrinsicInfo::RequiresCodegen(intrinsicId) && !HWIntrinsicInfo::HasSpecialImport(intrinsicId); } //------------------------------------------------------------------------ // isSupportedBaseType // // Arguments: // intrinsicId - HW intrinsic id // baseJitType - Base JIT type of the intrinsic. // // Return Value: // returns true if the baseType is supported for given intrinsic. // static bool isSupportedBaseType(NamedIntrinsic intrinsic, CorInfoType baseJitType) { if (baseJitType == CORINFO_TYPE_UNDEF) { return false; } var_types baseType = JitType2PreciseVarType(baseJitType); // We don't actually check the intrinsic outside of the false case as we expect // the exposed managed signatures are either generic and support all types // or they are explicit and support the type indicated. if (varTypeIsArithmetic(baseType)) { return true; } #ifdef TARGET_XARCH assert((intrinsic == NI_Vector128_As) || (intrinsic == NI_Vector128_AsByte) || (intrinsic == NI_Vector128_AsDouble) || (intrinsic == NI_Vector128_AsInt16) || (intrinsic == NI_Vector128_AsInt32) || (intrinsic == NI_Vector128_AsInt64) || (intrinsic == NI_Vector128_AsSByte) || (intrinsic == NI_Vector128_AsSingle) || (intrinsic == NI_Vector128_AsUInt16) || (intrinsic == NI_Vector128_AsUInt32) || (intrinsic == NI_Vector128_AsUInt64) || (intrinsic == NI_Vector128_get_AllBitsSet) || (intrinsic == NI_Vector128_get_Count) || (intrinsic == NI_Vector128_get_Zero) || (intrinsic == NI_Vector128_GetElement) || (intrinsic == NI_Vector128_WithElement) || (intrinsic == NI_Vector128_ToScalar) || (intrinsic == NI_Vector128_ToVector256) || (intrinsic == NI_Vector128_ToVector256Unsafe) || (intrinsic == NI_Vector256_As) || (intrinsic == NI_Vector256_AsByte) || (intrinsic == NI_Vector256_AsDouble) || (intrinsic == NI_Vector256_AsInt16) || (intrinsic == NI_Vector256_AsInt32) || (intrinsic == NI_Vector256_AsInt64) || (intrinsic == NI_Vector256_AsSByte) || (intrinsic == NI_Vector256_AsSingle) || (intrinsic == NI_Vector256_AsUInt16) || (intrinsic == NI_Vector256_AsUInt32) || (intrinsic == NI_Vector256_AsUInt64) || (intrinsic == NI_Vector256_get_AllBitsSet) || (intrinsic == NI_Vector256_get_Count) || (intrinsic == NI_Vector256_get_Zero) || (intrinsic == NI_Vector256_GetElement) || (intrinsic == NI_Vector256_WithElement) || (intrinsic == NI_Vector256_GetLower) || (intrinsic == NI_Vector256_ToScalar)); #endif // TARGET_XARCH #ifdef TARGET_ARM64 assert((intrinsic == NI_Vector64_As) || (intrinsic == NI_Vector64_AsByte) || (intrinsic == NI_Vector64_AsDouble) || (intrinsic == NI_Vector64_AsInt16) || (intrinsic == NI_Vector64_AsInt32) || (intrinsic == NI_Vector64_AsInt64) || (intrinsic == NI_Vector64_AsSByte) || (intrinsic == NI_Vector64_AsSingle) || (intrinsic == NI_Vector64_AsUInt16) || (intrinsic == NI_Vector64_AsUInt32) || (intrinsic == NI_Vector64_AsUInt64) || (intrinsic == NI_Vector64_get_AllBitsSet) || (intrinsic == NI_Vector64_get_Count) || (intrinsic == NI_Vector64_get_Zero) || (intrinsic == NI_Vector64_GetElement) || (intrinsic == NI_Vector64_ToScalar) || (intrinsic == NI_Vector64_ToVector128) || (intrinsic == NI_Vector64_ToVector128Unsafe) || (intrinsic == NI_Vector64_WithElement) || (intrinsic == NI_Vector128_As) || (intrinsic == NI_Vector128_AsByte) || (intrinsic == NI_Vector128_AsDouble) || (intrinsic == NI_Vector128_AsInt16) || (intrinsic == NI_Vector128_AsInt32) || (intrinsic == NI_Vector128_AsInt64) || (intrinsic == NI_Vector128_AsSByte) || (intrinsic == NI_Vector128_AsSingle) || (intrinsic == NI_Vector128_AsUInt16) || (intrinsic == NI_Vector128_AsUInt32) || (intrinsic == NI_Vector128_AsUInt64) || (intrinsic == NI_Vector128_get_AllBitsSet) || (intrinsic == NI_Vector128_get_Count) || (intrinsic == NI_Vector128_get_Zero) || (intrinsic == NI_Vector128_GetElement) || (intrinsic == NI_Vector128_GetLower) || (intrinsic == NI_Vector128_GetUpper) || (intrinsic == NI_Vector128_ToScalar) || (intrinsic == NI_Vector128_WithElement)); #endif // TARGET_ARM64 return false; } // HWIntrinsicSignatureReader: a helper class that "reads" a list of hardware intrinsic arguments and stores // the corresponding argument type descriptors as the fields of the class instance. // struct HWIntrinsicSignatureReader final { // Read: enumerates the list of arguments of a hardware intrinsic and stores the CORINFO_CLASS_HANDLE // and var_types values of each operand into the corresponding fields of the class instance. // // Arguments: // compHnd -- an instance of COMP_HANDLE class. // sig -- a hardware intrinsic signature. // void Read(COMP_HANDLE compHnd, CORINFO_SIG_INFO* sig) { CORINFO_ARG_LIST_HANDLE args = sig->args; if (sig->numArgs > 0) { op1JitType = strip(compHnd->getArgType(sig, args, &op1ClsHnd)); if (sig->numArgs > 1) { args = compHnd->getArgNext(args); op2JitType = strip(compHnd->getArgType(sig, args, &op2ClsHnd)); } if (sig->numArgs > 2) { args = compHnd->getArgNext(args); op3JitType = strip(compHnd->getArgType(sig, args, &op3ClsHnd)); } if (sig->numArgs > 3) { args = compHnd->getArgNext(args); op4JitType = strip(compHnd->getArgType(sig, args, &op4ClsHnd)); } } } CORINFO_CLASS_HANDLE op1ClsHnd; CORINFO_CLASS_HANDLE op2ClsHnd; CORINFO_CLASS_HANDLE op3ClsHnd; CORINFO_CLASS_HANDLE op4ClsHnd; CorInfoType op1JitType; CorInfoType op2JitType; CorInfoType op3JitType; CorInfoType op4JitType; var_types GetOp1Type() const { return JITtype2varType(op1JitType); } var_types GetOp2Type() const { return JITtype2varType(op2JitType); } var_types GetOp3Type() const { return JITtype2varType(op3JitType); } var_types GetOp4Type() const { return JITtype2varType(op4JitType); } }; //------------------------------------------------------------------------ // impHWIntrinsic: Import a hardware intrinsic as a GT_HWINTRINSIC node if possible // // Arguments: // intrinsic -- id of the intrinsic function. // clsHnd -- class handle containing the intrinsic function. // method -- method handle of the intrinsic function. // sig -- signature of the intrinsic call // mustExpand -- true if the intrinsic must return a GenTree*; otherwise, false // Return Value: // The GT_HWINTRINSIC node, or nullptr if not a supported intrinsic // GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand) { HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsic); CORINFO_InstructionSet isa = HWIntrinsicInfo::lookupIsa(intrinsic); int numArgs = sig->numArgs; var_types retType = JITtype2varType(sig->retType); CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; if ((retType == TYP_STRUCT) && featureSIMD) { unsigned int sizeBytes; simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sig->retTypeSigClass, &sizeBytes); if (HWIntrinsicInfo::IsMultiReg(intrinsic)) { assert(sizeBytes == 0); } else { assert(sizeBytes != 0); // We want to return early here for cases where retType was TYP_STRUCT as per method signature and // rather than deferring the decision after getting the simdBaseJitType of arg. if (!isSupportedBaseType(intrinsic, simdBaseJitType)) { return nullptr; } retType = getSIMDTypeForSize(sizeBytes); } } simdBaseJitType = getBaseJitTypeFromArgIfNeeded(intrinsic, clsHnd, sig, simdBaseJitType); if (simdBaseJitType == CORINFO_TYPE_UNDEF) { if ((category == HW_Category_Scalar) || HWIntrinsicInfo::isScalarIsa(isa)) { simdBaseJitType = sig->retType; if (simdBaseJitType == CORINFO_TYPE_VOID) { simdBaseJitType = CORINFO_TYPE_UNDEF; } } else { assert(featureSIMD); unsigned int sizeBytes; simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(clsHnd, &sizeBytes); assert((category == HW_Category_Special) || (category == HW_Category_Helper) || (sizeBytes != 0)); } } // Immediately return if the category is other than scalar/special and this is not a supported base type. if ((category != HW_Category_Special) && (category != HW_Category_Scalar) && !HWIntrinsicInfo::isScalarIsa(isa) && !isSupportedBaseType(intrinsic, simdBaseJitType)) { return nullptr; } var_types simdBaseType = TYP_UNKNOWN; GenTree* immOp = nullptr; if (simdBaseJitType != CORINFO_TYPE_UNDEF) { simdBaseType = JitType2PreciseVarType(simdBaseJitType); } HWIntrinsicSignatureReader sigReader; sigReader.Read(info.compCompHnd, sig); #ifdef TARGET_ARM64 if ((intrinsic == NI_AdvSimd_Insert) || (intrinsic == NI_AdvSimd_InsertScalar) || (intrinsic == NI_AdvSimd_LoadAndInsertScalar)) { assert(sig->numArgs == 3); immOp = impStackTop(1).val; assert(HWIntrinsicInfo::isImmOp(intrinsic, immOp)); } else if (intrinsic == NI_AdvSimd_Arm64_InsertSelectedScalar) { // InsertSelectedScalar intrinsic has two immediate operands. // Since all the remaining intrinsics on both platforms have only one immediate // operand, in order to not complicate the shared logic even further we ensure here that // 1) The second immediate operand immOp2 is constant and // 2) its value belongs to [0, sizeof(op3) / sizeof(op3.BaseType)). // If either is false, we should fallback to the managed implementation Insert(dst, dstIdx, Extract(src, // srcIdx)). // The check for the first immediate operand immOp will use the same logic as other intrinsics that have an // immediate operand. GenTree* immOp2 = nullptr; assert(sig->numArgs == 4); immOp = impStackTop(2).val; immOp2 = impStackTop().val; assert(HWIntrinsicInfo::isImmOp(intrinsic, immOp)); assert(HWIntrinsicInfo::isImmOp(intrinsic, immOp2)); if (!immOp2->IsCnsIntOrI()) { assert(HWIntrinsicInfo::NoJmpTableImm(intrinsic)); return impNonConstFallback(intrinsic, retType, simdBaseJitType); } unsigned int otherSimdSize = 0; CorInfoType otherBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sigReader.op3ClsHnd, &otherSimdSize); var_types otherBaseType = JitType2PreciseVarType(otherBaseJitType); assert(otherBaseJitType == simdBaseJitType); int immLowerBound2 = 0; int immUpperBound2 = 0; HWIntrinsicInfo::lookupImmBounds(intrinsic, otherSimdSize, otherBaseType, &immLowerBound2, &immUpperBound2); const int immVal2 = (int)immOp2->AsIntCon()->IconValue(); if ((immVal2 < immLowerBound2) || (immVal2 > immUpperBound2)) { assert(!mustExpand); return nullptr; } } else #endif if ((sig->numArgs > 0) && HWIntrinsicInfo::isImmOp(intrinsic, impStackTop().val)) { // NOTE: The following code assumes that for all intrinsics // taking an immediate operand, that operand will be last. immOp = impStackTop().val; } const unsigned simdSize = HWIntrinsicInfo::lookupSimdSize(this, intrinsic, sig); int immLowerBound = 0; int immUpperBound = 0; bool hasFullRangeImm = false; if (immOp != nullptr) { #ifdef TARGET_XARCH immUpperBound = HWIntrinsicInfo::lookupImmUpperBound(intrinsic); hasFullRangeImm = HWIntrinsicInfo::HasFullRangeImm(intrinsic); #elif defined(TARGET_ARM64) if (category == HW_Category_SIMDByIndexedElement) { CorInfoType indexedElementBaseJitType; var_types indexedElementBaseType; unsigned int indexedElementSimdSize = 0; if (numArgs == 3) { indexedElementBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sigReader.op2ClsHnd, &indexedElementSimdSize); indexedElementBaseType = JitType2PreciseVarType(indexedElementBaseJitType); } else { assert(numArgs == 4); indexedElementBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sigReader.op3ClsHnd, &indexedElementSimdSize); indexedElementBaseType = JitType2PreciseVarType(indexedElementBaseJitType); if (intrinsic == NI_Dp_DotProductBySelectedQuadruplet) { assert(((simdBaseType == TYP_INT) && (indexedElementBaseType == TYP_BYTE)) || ((simdBaseType == TYP_UINT) && (indexedElementBaseType == TYP_UBYTE))); // The second source operand of sdot, udot instructions is an indexed 32-bit element. indexedElementBaseJitType = simdBaseJitType; indexedElementBaseType = simdBaseType; } } assert(indexedElementBaseType == simdBaseType); HWIntrinsicInfo::lookupImmBounds(intrinsic, indexedElementSimdSize, simdBaseType, &immLowerBound, &immUpperBound); } else { HWIntrinsicInfo::lookupImmBounds(intrinsic, simdSize, simdBaseType, &immLowerBound, &immUpperBound); } #endif if (!hasFullRangeImm && immOp->IsCnsIntOrI()) { const int ival = (int)immOp->AsIntCon()->IconValue(); bool immOutOfRange; #ifdef TARGET_XARCH if (HWIntrinsicInfo::isAVX2GatherIntrinsic(intrinsic)) { immOutOfRange = (ival != 1) && (ival != 2) && (ival != 4) && (ival != 8); } else #endif { immOutOfRange = (ival < immLowerBound) || (ival > immUpperBound); } if (immOutOfRange) { assert(!mustExpand); // The imm-HWintrinsics that do not accept all imm8 values may throw // ArgumentOutOfRangeException when the imm argument is not in the valid range return nullptr; } } else if (!immOp->IsCnsIntOrI()) { if (HWIntrinsicInfo::NoJmpTableImm(intrinsic)) { return impNonConstFallback(intrinsic, retType, simdBaseJitType); } else if (!mustExpand) { // When the imm-argument is not a constant and we are not being forced to expand, we need to // return nullptr so a GT_CALL to the intrinsic method is emitted instead. The // intrinsic method is recursive and will be forced to expand, at which point // we emit some less efficient fallback code. return nullptr; } } } if (HWIntrinsicInfo::IsFloatingPointUsed(intrinsic)) { // Set `compFloatingPointUsed` to cover the scenario where an intrinsic is operating on SIMD fields, but // where no SIMD local vars are in use. This is the same logic as is used for FEATURE_SIMD. compFloatingPointUsed = true; } // table-driven importer of simple intrinsics if (impIsTableDrivenHWIntrinsic(intrinsic, category)) { const bool isScalar = (category == HW_Category_Scalar); assert(numArgs >= 0); if (!isScalar && ((HWIntrinsicInfo::lookupIns(intrinsic, simdBaseType) == INS_invalid) || ((simdSize != 8) && (simdSize != 16) && (simdSize != 32)))) { assert(!"Unexpected HW Intrinsic"); return nullptr; } GenTree* op1 = nullptr; GenTree* op2 = nullptr; GenTree* op3 = nullptr; GenTree* op4 = nullptr; GenTreeHWIntrinsic* retNode = nullptr; switch (numArgs) { case 0: assert(!isScalar); retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, simdBaseJitType, simdSize); break; case 1: op1 = getArgForHWIntrinsic(sigReader.GetOp1Type(), sigReader.op1ClsHnd); if ((category == HW_Category_MemoryLoad) && op1->OperIs(GT_CAST)) { // Although the API specifies a pointer, if what we have is a BYREF, that's what // we really want, so throw away the cast. if (op1->gtGetOp1()->TypeGet() == TYP_BYREF) { op1 = op1->gtGetOp1(); } } retNode = isScalar ? gtNewScalarHWIntrinsicNode(retType, op1, intrinsic) : gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); #if defined(TARGET_XARCH) switch (intrinsic) { case NI_SSE41_ConvertToVector128Int16: case NI_SSE41_ConvertToVector128Int32: case NI_SSE41_ConvertToVector128Int64: case NI_AVX2_BroadcastScalarToVector128: case NI_AVX2_BroadcastScalarToVector256: case NI_AVX2_ConvertToVector256Int16: case NI_AVX2_ConvertToVector256Int32: case NI_AVX2_ConvertToVector256Int64: { // These intrinsics have both pointer and vector overloads // We want to be able to differentiate between them so lets // just track the aux type as a ptr or undefined, depending CorInfoType auxiliaryType = CORINFO_TYPE_UNDEF; if (!varTypeIsSIMD(op1->TypeGet())) { auxiliaryType = CORINFO_TYPE_PTR; } retNode->AsHWIntrinsic()->SetAuxiliaryJitType(auxiliaryType); break; } default: { break; } } #endif // TARGET_XARCH break; case 2: op2 = getArgForHWIntrinsic(sigReader.GetOp2Type(), sigReader.op2ClsHnd); op2 = addRangeCheckIfNeeded(intrinsic, op2, mustExpand, immLowerBound, immUpperBound); op1 = getArgForHWIntrinsic(sigReader.GetOp1Type(), sigReader.op1ClsHnd); retNode = isScalar ? gtNewScalarHWIntrinsicNode(retType, op1, op2, intrinsic) : gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); #ifdef TARGET_XARCH if ((intrinsic == NI_SSE42_Crc32) || (intrinsic == NI_SSE42_X64_Crc32)) { // TODO-XArch-Cleanup: currently we use the simdBaseJitType to bring the type of the second argument // to the code generator. May encode the overload info in other way. retNode->AsHWIntrinsic()->SetSimdBaseJitType(sigReader.op2JitType); } #elif defined(TARGET_ARM64) switch (intrinsic) { case NI_Crc32_ComputeCrc32: case NI_Crc32_ComputeCrc32C: case NI_Crc32_Arm64_ComputeCrc32: case NI_Crc32_Arm64_ComputeCrc32C: retNode->AsHWIntrinsic()->SetSimdBaseJitType(sigReader.op2JitType); break; case NI_AdvSimd_AddWideningUpper: case NI_AdvSimd_SubtractWideningUpper: assert(varTypeIsSIMD(op1->TypeGet())); retNode->AsHWIntrinsic()->SetAuxiliaryJitType(getBaseJitTypeOfSIMDType(sigReader.op1ClsHnd)); break; case NI_AdvSimd_Arm64_AddSaturateScalar: assert(varTypeIsSIMD(op2->TypeGet())); retNode->AsHWIntrinsic()->SetAuxiliaryJitType(getBaseJitTypeOfSIMDType(sigReader.op2ClsHnd)); break; case NI_ArmBase_Arm64_MultiplyHigh: if (sig->retType == CORINFO_TYPE_ULONG) { retNode->AsHWIntrinsic()->SetSimdBaseJitType(CORINFO_TYPE_ULONG); } else { assert(sig->retType == CORINFO_TYPE_LONG); retNode->AsHWIntrinsic()->SetSimdBaseJitType(CORINFO_TYPE_LONG); } break; default: break; } #endif break; case 3: op3 = getArgForHWIntrinsic(sigReader.GetOp3Type(), sigReader.op3ClsHnd); op2 = getArgForHWIntrinsic(sigReader.GetOp2Type(), sigReader.op2ClsHnd); op1 = getArgForHWIntrinsic(sigReader.GetOp1Type(), sigReader.op1ClsHnd); #ifdef TARGET_ARM64 if (intrinsic == NI_AdvSimd_LoadAndInsertScalar) { op2 = addRangeCheckIfNeeded(intrinsic, op2, mustExpand, immLowerBound, immUpperBound); if (op1->OperIs(GT_CAST)) { // Although the API specifies a pointer, if what we have is a BYREF, that's what // we really want, so throw away the cast. if (op1->gtGetOp1()->TypeGet() == TYP_BYREF) { op1 = op1->gtGetOp1(); } } } else if ((intrinsic == NI_AdvSimd_Insert) || (intrinsic == NI_AdvSimd_InsertScalar)) { op2 = addRangeCheckIfNeeded(intrinsic, op2, mustExpand, immLowerBound, immUpperBound); } else #endif { op3 = addRangeCheckIfNeeded(intrinsic, op3, mustExpand, immLowerBound, immUpperBound); } retNode = isScalar ? gtNewScalarHWIntrinsicNode(retType, op1, op2, op3, intrinsic) : gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); #ifdef TARGET_XARCH if ((intrinsic == NI_AVX2_GatherVector128) || (intrinsic == NI_AVX2_GatherVector256)) { assert(varTypeIsSIMD(op2->TypeGet())); retNode->AsHWIntrinsic()->SetAuxiliaryJitType(getBaseJitTypeOfSIMDType(sigReader.op2ClsHnd)); } #endif break; #ifdef TARGET_ARM64 case 4: op4 = getArgForHWIntrinsic(sigReader.GetOp4Type(), sigReader.op4ClsHnd); op4 = addRangeCheckIfNeeded(intrinsic, op4, mustExpand, immLowerBound, immUpperBound); op3 = getArgForHWIntrinsic(sigReader.GetOp3Type(), sigReader.op3ClsHnd); op2 = getArgForHWIntrinsic(sigReader.GetOp2Type(), sigReader.op2ClsHnd); op1 = getArgForHWIntrinsic(sigReader.GetOp1Type(), sigReader.op1ClsHnd); assert(!isScalar); retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, op4, intrinsic, simdBaseJitType, simdSize); break; #endif default: return nullptr; } const bool isMemoryStore = retNode->OperIsMemoryStore(); if (isMemoryStore || retNode->OperIsMemoryLoad()) { if (isMemoryStore) { // A MemoryStore operation is an assignment retNode->gtFlags |= GTF_ASG; } // This operation contains an implicit indirection // it could point into the global heap or // it could throw a null reference exception. // retNode->gtFlags |= (GTF_GLOB_REF | GTF_EXCEPT); } return retNode; } return impSpecialIntrinsic(intrinsic, clsHnd, method, sig, simdBaseJitType, retType, simdSize); } #endif // FEATURE_HW_INTRINSICS
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "jitpch.h" #include "hwintrinsic.h" #ifdef FEATURE_HW_INTRINSICS static const HWIntrinsicInfo hwIntrinsicInfoArray[] = { // clang-format off #if defined(TARGET_XARCH) #define HARDWARE_INTRINSIC(isa, name, size, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, flag) \ {NI_##isa##_##name, #name, InstructionSet_##isa, size, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, static_cast<HWIntrinsicFlag>(flag)}, #include "hwintrinsiclistxarch.h" #elif defined (TARGET_ARM64) #define HARDWARE_INTRINSIC(isa, name, size, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, flag) \ {NI_##isa##_##name, #name, InstructionSet_##isa, size, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, static_cast<HWIntrinsicFlag>(flag)}, #include "hwintrinsiclistarm64.h" #else #error Unsupported platform #endif // clang-format on }; //------------------------------------------------------------------------ // lookup: Gets the HWIntrinsicInfo associated with a given NamedIntrinsic // // Arguments: // id -- The NamedIntrinsic associated with the HWIntrinsic to lookup // // Return Value: // The HWIntrinsicInfo associated with id const HWIntrinsicInfo& HWIntrinsicInfo::lookup(NamedIntrinsic id) { assert(id != NI_Illegal); assert(id > NI_HW_INTRINSIC_START); assert(id < NI_HW_INTRINSIC_END); return hwIntrinsicInfoArray[id - NI_HW_INTRINSIC_START - 1]; } //------------------------------------------------------------------------ // getBaseJitTypeFromArgIfNeeded: Get simdBaseJitType of intrinsic from 1st or 2nd argument depending on the flag // // Arguments: // intrinsic -- id of the intrinsic function. // clsHnd -- class handle containing the intrinsic function. // method -- method handle of the intrinsic function. // sig -- signature of the intrinsic call. // simdBaseJitType -- Predetermined simdBaseJitType, could be CORINFO_TYPE_UNDEF // // Return Value: // The basetype of intrinsic of it can be fetched from 1st or 2nd argument, else return baseType unmodified. // CorInfoType Compiler::getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType) { if (HWIntrinsicInfo::BaseTypeFromSecondArg(intrinsic) || HWIntrinsicInfo::BaseTypeFromFirstArg(intrinsic)) { CORINFO_ARG_LIST_HANDLE arg = sig->args; if (HWIntrinsicInfo::BaseTypeFromSecondArg(intrinsic)) { arg = info.compCompHnd->getArgNext(arg); } CORINFO_CLASS_HANDLE argClass = info.compCompHnd->getArgClass(sig, arg); simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(argClass); if (simdBaseJitType == CORINFO_TYPE_UNDEF) // the argument is not a vector { CORINFO_CLASS_HANDLE tmpClass; simdBaseJitType = strip(info.compCompHnd->getArgType(sig, arg, &tmpClass)); if (simdBaseJitType == CORINFO_TYPE_PTR) { simdBaseJitType = info.compCompHnd->getChildType(argClass, &tmpClass); } } assert(simdBaseJitType != CORINFO_TYPE_UNDEF); } return simdBaseJitType; } CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleForHWSIMD(var_types simdType, CorInfoType simdBaseJitType) { if (m_simdHandleCache == nullptr) { return NO_CLASS_HANDLE; } if (simdType == TYP_SIMD16) { switch (simdBaseJitType) { case CORINFO_TYPE_FLOAT: return m_simdHandleCache->Vector128FloatHandle; case CORINFO_TYPE_DOUBLE: return m_simdHandleCache->Vector128DoubleHandle; case CORINFO_TYPE_INT: return m_simdHandleCache->Vector128IntHandle; case CORINFO_TYPE_USHORT: return m_simdHandleCache->Vector128UShortHandle; case CORINFO_TYPE_UBYTE: return m_simdHandleCache->Vector128UByteHandle; case CORINFO_TYPE_SHORT: return m_simdHandleCache->Vector128ShortHandle; case CORINFO_TYPE_BYTE: return m_simdHandleCache->Vector128ByteHandle; case CORINFO_TYPE_LONG: return m_simdHandleCache->Vector128LongHandle; case CORINFO_TYPE_UINT: return m_simdHandleCache->Vector128UIntHandle; case CORINFO_TYPE_ULONG: return m_simdHandleCache->Vector128ULongHandle; case CORINFO_TYPE_NATIVEINT: return m_simdHandleCache->Vector128NIntHandle; case CORINFO_TYPE_NATIVEUINT: return m_simdHandleCache->Vector128NUIntHandle; default: assert(!"Didn't find a class handle for simdType"); } } #ifdef TARGET_XARCH else if (simdType == TYP_SIMD32) { switch (simdBaseJitType) { case CORINFO_TYPE_FLOAT: return m_simdHandleCache->Vector256FloatHandle; case CORINFO_TYPE_DOUBLE: return m_simdHandleCache->Vector256DoubleHandle; case CORINFO_TYPE_INT: return m_simdHandleCache->Vector256IntHandle; case CORINFO_TYPE_USHORT: return m_simdHandleCache->Vector256UShortHandle; case CORINFO_TYPE_UBYTE: return m_simdHandleCache->Vector256UByteHandle; case CORINFO_TYPE_SHORT: return m_simdHandleCache->Vector256ShortHandle; case CORINFO_TYPE_BYTE: return m_simdHandleCache->Vector256ByteHandle; case CORINFO_TYPE_LONG: return m_simdHandleCache->Vector256LongHandle; case CORINFO_TYPE_UINT: return m_simdHandleCache->Vector256UIntHandle; case CORINFO_TYPE_ULONG: return m_simdHandleCache->Vector256ULongHandle; case CORINFO_TYPE_NATIVEINT: return m_simdHandleCache->Vector256NIntHandle; case CORINFO_TYPE_NATIVEUINT: return m_simdHandleCache->Vector256NUIntHandle; default: assert(!"Didn't find a class handle for simdType"); } } #endif // TARGET_XARCH #ifdef TARGET_ARM64 else if (simdType == TYP_SIMD8) { switch (simdBaseJitType) { case CORINFO_TYPE_FLOAT: return m_simdHandleCache->Vector64FloatHandle; case CORINFO_TYPE_DOUBLE: return m_simdHandleCache->Vector64DoubleHandle; case CORINFO_TYPE_INT: return m_simdHandleCache->Vector64IntHandle; case CORINFO_TYPE_USHORT: return m_simdHandleCache->Vector64UShortHandle; case CORINFO_TYPE_UBYTE: return m_simdHandleCache->Vector64UByteHandle; case CORINFO_TYPE_SHORT: return m_simdHandleCache->Vector64ShortHandle; case CORINFO_TYPE_BYTE: return m_simdHandleCache->Vector64ByteHandle; case CORINFO_TYPE_UINT: return m_simdHandleCache->Vector64UIntHandle; case CORINFO_TYPE_LONG: return m_simdHandleCache->Vector64LongHandle; case CORINFO_TYPE_ULONG: return m_simdHandleCache->Vector64ULongHandle; case CORINFO_TYPE_NATIVEINT: return m_simdHandleCache->Vector64NIntHandle; case CORINFO_TYPE_NATIVEUINT: return m_simdHandleCache->Vector64NUIntHandle; default: assert(!"Didn't find a class handle for simdType"); } } #endif // TARGET_ARM64 return NO_CLASS_HANDLE; } //------------------------------------------------------------------------ // vnEncodesResultTypeForHWIntrinsic(NamedIntrinsic hwIntrinsicID): // // Arguments: // hwIntrinsicID -- The id for the HW intrinsic // // Return Value: // Returns true if this intrinsic requires value numbering to add an // extra SimdType argument that encodes the resulting type. // If we don't do this overloaded versions can return the same VN // leading to incorrect CSE subsitutions. // /* static */ bool Compiler::vnEncodesResultTypeForHWIntrinsic(NamedIntrinsic hwIntrinsicID) { int numArgs = HWIntrinsicInfo::lookupNumArgs(hwIntrinsicID); // HW Intrinsic's with -1 for numArgs have a varying number of args, so we currently // give themm a unique value number them, and don't add an extra argument. // if (numArgs == -1) { return false; } // We iterate over all of the different baseType's for this intrinsic in the HWIntrinsicInfo table // We set diffInsCount to the number of instructions that can execute differently. // unsigned diffInsCount = 0; #ifdef TARGET_XARCH instruction lastIns = INS_invalid; #endif for (var_types baseType = TYP_BYTE; (baseType <= TYP_DOUBLE); baseType = (var_types)(baseType + 1)) { instruction curIns = HWIntrinsicInfo::lookupIns(hwIntrinsicID, baseType); if (curIns != INS_invalid) { #ifdef TARGET_XARCH if (curIns != lastIns) { diffInsCount++; // remember the last valid instruction that we saw lastIns = curIns; } #elif defined(TARGET_ARM64) // On ARM64 we use the same instruction and specify an insOpt arrangement // so we always consider the instruction operation to be different // diffInsCount++; #endif // TARGET if (diffInsCount >= 2) { // We can early exit the loop now break; } } } // If we see two (or more) different instructions we need the extra VNF_SimdType arg return (diffInsCount >= 2); } //------------------------------------------------------------------------ // lookupId: Gets the NamedIntrinsic for a given method name and InstructionSet // // Arguments: // comp -- The compiler // sig -- The signature of the intrinsic // className -- The name of the class associated with the HWIntrinsic to lookup // methodName -- The name of the method associated with the HWIntrinsic to lookup // enclosingClassName -- The name of the enclosing class of X64 classes // // Return Value: // The NamedIntrinsic associated with methodName and isa NamedIntrinsic HWIntrinsicInfo::lookupId(Compiler* comp, CORINFO_SIG_INFO* sig, const char* className, const char* methodName, const char* enclosingClassName) { // TODO-Throughput: replace sequential search by binary search CORINFO_InstructionSet isa = lookupIsa(className, enclosingClassName); if (isa == InstructionSet_ILLEGAL) { return NI_Illegal; } bool isIsaSupported = comp->compSupportsHWIntrinsic(isa); bool isHardwareAcceleratedProp = (strcmp(methodName, "get_IsHardwareAccelerated") == 0); #ifdef TARGET_XARCH if (isHardwareAcceleratedProp) { // Special case: Some of Vector128/256 APIs are hardware accelerated with Sse1 and Avx1, // but we want IsHardwareAccelerated to return true only when all of them are (there are // still can be cases where e.g. Sse41 might give an additional boost for Vector128, but it's // not important enough to bump the minimal Sse version here) if (strcmp(className, "Vector128") == 0) { isa = InstructionSet_SSE2; } else if (strcmp(className, "Vector256") == 0) { isa = InstructionSet_AVX2; } } #endif if ((strcmp(methodName, "get_IsSupported") == 0) || isHardwareAcceleratedProp) { return isIsaSupported ? (comp->compExactlyDependsOn(isa) ? NI_IsSupported_True : NI_IsSupported_Dynamic) : NI_IsSupported_False; } else if (!isIsaSupported) { return NI_Throw_PlatformNotSupportedException; } for (int i = 0; i < (NI_HW_INTRINSIC_END - NI_HW_INTRINSIC_START - 1); i++) { const HWIntrinsicInfo& intrinsicInfo = hwIntrinsicInfoArray[i]; if (isa != hwIntrinsicInfoArray[i].isa) { continue; } int numArgs = static_cast<unsigned>(intrinsicInfo.numArgs); if ((numArgs != -1) && (sig->numArgs != static_cast<unsigned>(intrinsicInfo.numArgs))) { continue; } if (strcmp(methodName, intrinsicInfo.name) == 0) { return intrinsicInfo.id; } } // There are several helper intrinsics that are implemented in managed code // Those intrinsics will hit this code path and need to return NI_Illegal return NI_Illegal; } //------------------------------------------------------------------------ // lookupSimdSize: Gets the SimdSize for a given HWIntrinsic and signature // // Arguments: // id -- The ID associated with the HWIntrinsic to lookup // sig -- The signature of the HWIntrinsic to lookup // // Return Value: // The SIMD size for the HWIntrinsic associated with id and sig // // Remarks: // This function is only used by the importer. After importation, we can // get the SIMD size from the GenTreeHWIntrinsic node. unsigned HWIntrinsicInfo::lookupSimdSize(Compiler* comp, NamedIntrinsic id, CORINFO_SIG_INFO* sig) { unsigned simdSize = 0; if (tryLookupSimdSize(id, &simdSize)) { return simdSize; } CORINFO_CLASS_HANDLE typeHnd = nullptr; if (HWIntrinsicInfo::BaseTypeFromFirstArg(id)) { typeHnd = comp->info.compCompHnd->getArgClass(sig, sig->args); } else if (HWIntrinsicInfo::BaseTypeFromSecondArg(id)) { CORINFO_ARG_LIST_HANDLE secondArg = comp->info.compCompHnd->getArgNext(sig->args); typeHnd = comp->info.compCompHnd->getArgClass(sig, secondArg); } else { assert(JITtype2varType(sig->retType) == TYP_STRUCT); typeHnd = sig->retTypeSigClass; } CorInfoType simdBaseJitType = comp->getBaseJitTypeAndSizeOfSIMDType(typeHnd, &simdSize); assert((simdSize > 0) && (simdBaseJitType != CORINFO_TYPE_UNDEF)); return simdSize; } //------------------------------------------------------------------------ // isImmOp: Checks whether the HWIntrinsic node has an imm operand // // Arguments: // id -- The NamedIntrinsic associated with the HWIntrinsic to lookup // op -- The operand to check // // Return Value: // true if the node has an imm operand; otherwise, false bool HWIntrinsicInfo::isImmOp(NamedIntrinsic id, const GenTree* op) { #ifdef TARGET_XARCH if (HWIntrinsicInfo::lookupCategory(id) != HW_Category_IMM) { return false; } if (!HWIntrinsicInfo::MaybeImm(id)) { return true; } #elif defined(TARGET_ARM64) if (!HWIntrinsicInfo::HasImmediateOperand(id)) { return false; } #else #error Unsupported platform #endif if (genActualType(op->TypeGet()) != TYP_INT) { return false; } return true; } //------------------------------------------------------------------------ // getArgForHWIntrinsic: pop an argument from the stack and validate its type // // Arguments: // argType -- the required type of argument // argClass -- the class handle of argType // expectAddr -- if true indicates we are expecting type stack entry to be a TYP_BYREF. // newobjThis -- For CEE_NEWOBJ, this is the temp grabbed for the allocated uninitalized object. // // Return Value: // the validated argument // GenTree* Compiler::getArgForHWIntrinsic(var_types argType, CORINFO_CLASS_HANDLE argClass, bool expectAddr, GenTree* newobjThis) { GenTree* arg = nullptr; if (varTypeIsStruct(argType)) { if (!varTypeIsSIMD(argType)) { unsigned int argSizeBytes; (void)getBaseJitTypeAndSizeOfSIMDType(argClass, &argSizeBytes); argType = getSIMDTypeForSize(argSizeBytes); } assert(varTypeIsSIMD(argType)); if (newobjThis == nullptr) { arg = impSIMDPopStack(argType, expectAddr); assert(varTypeIsSIMD(arg->TypeGet())); } else { assert((newobjThis->gtOper == GT_ADDR) && (newobjThis->AsOp()->gtOp1->gtOper == GT_LCL_VAR)); arg = newobjThis; // push newobj result on type stack unsigned tmp = arg->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(argClass).NormaliseForStack()); } } else { assert(varTypeIsArithmetic(argType)); arg = impPopStack().val; assert(varTypeIsArithmetic(arg->TypeGet())); assert(genActualType(arg->gtType) == genActualType(argType)); } return arg; } //------------------------------------------------------------------------ // addRangeCheckIfNeeded: add a GT_BOUNDS_CHECK node for non-full-range imm-intrinsic // // Arguments: // intrinsic -- intrinsic ID // immOp -- the immediate operand of the intrinsic // mustExpand -- true if the compiler is compiling the fallback(GT_CALL) of this intrinsics // immLowerBound -- lower incl. bound for a value of the immediate operand (for a non-full-range imm-intrinsic) // immUpperBound -- upper incl. bound for a value of the immediate operand (for a non-full-range imm-intrinsic) // // Return Value: // add a GT_BOUNDS_CHECK node for non-full-range imm-intrinsic, which would throw ArgumentOutOfRangeException // when the imm-argument is not in the valid range // GenTree* Compiler::addRangeCheckIfNeeded( NamedIntrinsic intrinsic, GenTree* immOp, bool mustExpand, int immLowerBound, int immUpperBound) { assert(immOp != nullptr); // Full-range imm-intrinsics do not need the range-check // because the imm-parameter of the intrinsic method is a byte. // AVX2 Gather intrinsics no not need the range-check // because their imm-parameter have discrete valid values that are handle by managed code if (mustExpand && HWIntrinsicInfo::isImmOp(intrinsic, immOp) #ifdef TARGET_XARCH && !HWIntrinsicInfo::isAVX2GatherIntrinsic(intrinsic) && !HWIntrinsicInfo::HasFullRangeImm(intrinsic) #endif ) { assert(!immOp->IsCnsIntOrI()); assert(varTypeIsUnsigned(immOp)); return addRangeCheckForHWIntrinsic(immOp, immLowerBound, immUpperBound); } else { return immOp; } } //------------------------------------------------------------------------ // addRangeCheckForHWIntrinsic: add a GT_BOUNDS_CHECK node for an intrinsic // // Arguments: // immOp -- the immediate operand of the intrinsic // immLowerBound -- lower incl. bound for a value of the immediate operand (for a non-full-range imm-intrinsic) // immUpperBound -- upper incl. bound for a value of the immediate operand (for a non-full-range imm-intrinsic) // // Return Value: // add a GT_BOUNDS_CHECK node for non-full-range imm-intrinsic, which would throw ArgumentOutOfRangeException // when the imm-argument is not in the valid range // GenTree* Compiler::addRangeCheckForHWIntrinsic(GenTree* immOp, int immLowerBound, int immUpperBound) { // Bounds check for value of an immediate operand // (immLowerBound <= immOp) && (immOp <= immUpperBound) // // implemented as a single comparison in the form of // // if ((immOp - immLowerBound) >= (immUpperBound - immLowerBound + 1)) // { // throw new ArgumentOutOfRangeException(); // } // // The value of (immUpperBound - immLowerBound + 1) is denoted as adjustedUpperBound. const ssize_t adjustedUpperBound = (ssize_t)immUpperBound - immLowerBound + 1; GenTree* adjustedUpperBoundNode = gtNewIconNode(adjustedUpperBound, TYP_INT); GenTree* immOpDup = nullptr; immOp = impCloneExpr(immOp, &immOpDup, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone an immediate operand for immediate value bounds check")); if (immLowerBound != 0) { immOpDup = gtNewOperNode(GT_SUB, TYP_INT, immOpDup, gtNewIconNode(immLowerBound, TYP_INT)); } GenTreeBoundsChk* hwIntrinsicChk = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(immOpDup, adjustedUpperBoundNode, SCK_ARG_RNG_EXCPN); return gtNewOperNode(GT_COMMA, immOp->TypeGet(), hwIntrinsicChk, immOp); } //------------------------------------------------------------------------ // compSupportsHWIntrinsic: check whether a given instruction is enabled via configuration // // Arguments: // isa - Instruction set // // Return Value: // true iff the given instruction set is enabled via configuration (environment variables, etc.). bool Compiler::compSupportsHWIntrinsic(CORINFO_InstructionSet isa) { return compHWIntrinsicDependsOn(isa) && (supportSIMDTypes() || HWIntrinsicInfo::isScalarIsa(isa)) && ( #ifdef DEBUG JitConfig.EnableIncompleteISAClass() || #endif HWIntrinsicInfo::isFullyImplementedIsa(isa)); } //------------------------------------------------------------------------ // impIsTableDrivenHWIntrinsic: // // Arguments: // intrinsicId - HW intrinsic id // category - category of a HW intrinsic // // Return Value: // returns true if this category can be table-driven in the importer // static bool impIsTableDrivenHWIntrinsic(NamedIntrinsic intrinsicId, HWIntrinsicCategory category) { return (category != HW_Category_Special) && HWIntrinsicInfo::RequiresCodegen(intrinsicId) && !HWIntrinsicInfo::HasSpecialImport(intrinsicId); } //------------------------------------------------------------------------ // isSupportedBaseType // // Arguments: // intrinsicId - HW intrinsic id // baseJitType - Base JIT type of the intrinsic. // // Return Value: // returns true if the baseType is supported for given intrinsic. // static bool isSupportedBaseType(NamedIntrinsic intrinsic, CorInfoType baseJitType) { if (baseJitType == CORINFO_TYPE_UNDEF) { return false; } var_types baseType = JitType2PreciseVarType(baseJitType); // We don't actually check the intrinsic outside of the false case as we expect // the exposed managed signatures are either generic and support all types // or they are explicit and support the type indicated. if (varTypeIsArithmetic(baseType)) { return true; } #ifdef TARGET_XARCH assert((intrinsic == NI_Vector128_As) || (intrinsic == NI_Vector128_AsByte) || (intrinsic == NI_Vector128_AsDouble) || (intrinsic == NI_Vector128_AsInt16) || (intrinsic == NI_Vector128_AsInt32) || (intrinsic == NI_Vector128_AsInt64) || (intrinsic == NI_Vector128_AsSByte) || (intrinsic == NI_Vector128_AsSingle) || (intrinsic == NI_Vector128_AsUInt16) || (intrinsic == NI_Vector128_AsUInt32) || (intrinsic == NI_Vector128_AsUInt64) || (intrinsic == NI_Vector128_get_AllBitsSet) || (intrinsic == NI_Vector128_get_Count) || (intrinsic == NI_Vector128_get_Zero) || (intrinsic == NI_Vector128_GetElement) || (intrinsic == NI_Vector128_WithElement) || (intrinsic == NI_Vector128_ToScalar) || (intrinsic == NI_Vector128_ToVector256) || (intrinsic == NI_Vector128_ToVector256Unsafe) || (intrinsic == NI_Vector256_As) || (intrinsic == NI_Vector256_AsByte) || (intrinsic == NI_Vector256_AsDouble) || (intrinsic == NI_Vector256_AsInt16) || (intrinsic == NI_Vector256_AsInt32) || (intrinsic == NI_Vector256_AsInt64) || (intrinsic == NI_Vector256_AsSByte) || (intrinsic == NI_Vector256_AsSingle) || (intrinsic == NI_Vector256_AsUInt16) || (intrinsic == NI_Vector256_AsUInt32) || (intrinsic == NI_Vector256_AsUInt64) || (intrinsic == NI_Vector256_get_AllBitsSet) || (intrinsic == NI_Vector256_get_Count) || (intrinsic == NI_Vector256_get_Zero) || (intrinsic == NI_Vector256_GetElement) || (intrinsic == NI_Vector256_WithElement) || (intrinsic == NI_Vector256_GetLower) || (intrinsic == NI_Vector256_ToScalar)); #endif // TARGET_XARCH #ifdef TARGET_ARM64 assert((intrinsic == NI_Vector64_As) || (intrinsic == NI_Vector64_AsByte) || (intrinsic == NI_Vector64_AsDouble) || (intrinsic == NI_Vector64_AsInt16) || (intrinsic == NI_Vector64_AsInt32) || (intrinsic == NI_Vector64_AsInt64) || (intrinsic == NI_Vector64_AsSByte) || (intrinsic == NI_Vector64_AsSingle) || (intrinsic == NI_Vector64_AsUInt16) || (intrinsic == NI_Vector64_AsUInt32) || (intrinsic == NI_Vector64_AsUInt64) || (intrinsic == NI_Vector64_get_AllBitsSet) || (intrinsic == NI_Vector64_get_Count) || (intrinsic == NI_Vector64_get_Zero) || (intrinsic == NI_Vector64_GetElement) || (intrinsic == NI_Vector64_ToScalar) || (intrinsic == NI_Vector64_ToVector128) || (intrinsic == NI_Vector64_ToVector128Unsafe) || (intrinsic == NI_Vector64_WithElement) || (intrinsic == NI_Vector128_As) || (intrinsic == NI_Vector128_AsByte) || (intrinsic == NI_Vector128_AsDouble) || (intrinsic == NI_Vector128_AsInt16) || (intrinsic == NI_Vector128_AsInt32) || (intrinsic == NI_Vector128_AsInt64) || (intrinsic == NI_Vector128_AsSByte) || (intrinsic == NI_Vector128_AsSingle) || (intrinsic == NI_Vector128_AsUInt16) || (intrinsic == NI_Vector128_AsUInt32) || (intrinsic == NI_Vector128_AsUInt64) || (intrinsic == NI_Vector128_get_AllBitsSet) || (intrinsic == NI_Vector128_get_Count) || (intrinsic == NI_Vector128_get_Zero) || (intrinsic == NI_Vector128_GetElement) || (intrinsic == NI_Vector128_GetLower) || (intrinsic == NI_Vector128_GetUpper) || (intrinsic == NI_Vector128_ToScalar) || (intrinsic == NI_Vector128_WithElement)); #endif // TARGET_ARM64 return false; } // HWIntrinsicSignatureReader: a helper class that "reads" a list of hardware intrinsic arguments and stores // the corresponding argument type descriptors as the fields of the class instance. // struct HWIntrinsicSignatureReader final { // Read: enumerates the list of arguments of a hardware intrinsic and stores the CORINFO_CLASS_HANDLE // and var_types values of each operand into the corresponding fields of the class instance. // // Arguments: // compHnd -- an instance of COMP_HANDLE class. // sig -- a hardware intrinsic signature. // void Read(COMP_HANDLE compHnd, CORINFO_SIG_INFO* sig) { CORINFO_ARG_LIST_HANDLE args = sig->args; if (sig->numArgs > 0) { op1JitType = strip(compHnd->getArgType(sig, args, &op1ClsHnd)); if (sig->numArgs > 1) { args = compHnd->getArgNext(args); op2JitType = strip(compHnd->getArgType(sig, args, &op2ClsHnd)); } if (sig->numArgs > 2) { args = compHnd->getArgNext(args); op3JitType = strip(compHnd->getArgType(sig, args, &op3ClsHnd)); } if (sig->numArgs > 3) { args = compHnd->getArgNext(args); op4JitType = strip(compHnd->getArgType(sig, args, &op4ClsHnd)); } } } CORINFO_CLASS_HANDLE op1ClsHnd; CORINFO_CLASS_HANDLE op2ClsHnd; CORINFO_CLASS_HANDLE op3ClsHnd; CORINFO_CLASS_HANDLE op4ClsHnd; CorInfoType op1JitType; CorInfoType op2JitType; CorInfoType op3JitType; CorInfoType op4JitType; var_types GetOp1Type() const { return JITtype2varType(op1JitType); } var_types GetOp2Type() const { return JITtype2varType(op2JitType); } var_types GetOp3Type() const { return JITtype2varType(op3JitType); } var_types GetOp4Type() const { return JITtype2varType(op4JitType); } }; //------------------------------------------------------------------------ // impHWIntrinsic: Import a hardware intrinsic as a GT_HWINTRINSIC node if possible // // Arguments: // intrinsic -- id of the intrinsic function. // clsHnd -- class handle containing the intrinsic function. // method -- method handle of the intrinsic function. // sig -- signature of the intrinsic call // mustExpand -- true if the intrinsic must return a GenTree*; otherwise, false // Return Value: // The GT_HWINTRINSIC node, or nullptr if not a supported intrinsic // GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand) { HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsic); CORINFO_InstructionSet isa = HWIntrinsicInfo::lookupIsa(intrinsic); int numArgs = sig->numArgs; var_types retType = JITtype2varType(sig->retType); CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; if ((retType == TYP_STRUCT) && supportSIMDTypes()) { unsigned int sizeBytes; simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sig->retTypeSigClass, &sizeBytes); if (HWIntrinsicInfo::IsMultiReg(intrinsic)) { assert(sizeBytes == 0); } else { assert(sizeBytes != 0); // We want to return early here for cases where retType was TYP_STRUCT as per method signature and // rather than deferring the decision after getting the simdBaseJitType of arg. if (!isSupportedBaseType(intrinsic, simdBaseJitType)) { return nullptr; } retType = getSIMDTypeForSize(sizeBytes); } } simdBaseJitType = getBaseJitTypeFromArgIfNeeded(intrinsic, clsHnd, sig, simdBaseJitType); if (simdBaseJitType == CORINFO_TYPE_UNDEF) { if ((category == HW_Category_Scalar) || HWIntrinsicInfo::isScalarIsa(isa)) { simdBaseJitType = sig->retType; if (simdBaseJitType == CORINFO_TYPE_VOID) { simdBaseJitType = CORINFO_TYPE_UNDEF; } } else { assert(supportSIMDTypes()); unsigned int sizeBytes; simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(clsHnd, &sizeBytes); assert((category == HW_Category_Special) || (category == HW_Category_Helper) || (sizeBytes != 0)); } } // Immediately return if the category is other than scalar/special and this is not a supported base type. if ((category != HW_Category_Special) && (category != HW_Category_Scalar) && !HWIntrinsicInfo::isScalarIsa(isa) && !isSupportedBaseType(intrinsic, simdBaseJitType)) { return nullptr; } var_types simdBaseType = TYP_UNKNOWN; GenTree* immOp = nullptr; if (simdBaseJitType != CORINFO_TYPE_UNDEF) { simdBaseType = JitType2PreciseVarType(simdBaseJitType); } HWIntrinsicSignatureReader sigReader; sigReader.Read(info.compCompHnd, sig); #ifdef TARGET_ARM64 if ((intrinsic == NI_AdvSimd_Insert) || (intrinsic == NI_AdvSimd_InsertScalar) || (intrinsic == NI_AdvSimd_LoadAndInsertScalar)) { assert(sig->numArgs == 3); immOp = impStackTop(1).val; assert(HWIntrinsicInfo::isImmOp(intrinsic, immOp)); } else if (intrinsic == NI_AdvSimd_Arm64_InsertSelectedScalar) { // InsertSelectedScalar intrinsic has two immediate operands. // Since all the remaining intrinsics on both platforms have only one immediate // operand, in order to not complicate the shared logic even further we ensure here that // 1) The second immediate operand immOp2 is constant and // 2) its value belongs to [0, sizeof(op3) / sizeof(op3.BaseType)). // If either is false, we should fallback to the managed implementation Insert(dst, dstIdx, Extract(src, // srcIdx)). // The check for the first immediate operand immOp will use the same logic as other intrinsics that have an // immediate operand. GenTree* immOp2 = nullptr; assert(sig->numArgs == 4); immOp = impStackTop(2).val; immOp2 = impStackTop().val; assert(HWIntrinsicInfo::isImmOp(intrinsic, immOp)); assert(HWIntrinsicInfo::isImmOp(intrinsic, immOp2)); if (!immOp2->IsCnsIntOrI()) { assert(HWIntrinsicInfo::NoJmpTableImm(intrinsic)); return impNonConstFallback(intrinsic, retType, simdBaseJitType); } unsigned int otherSimdSize = 0; CorInfoType otherBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sigReader.op3ClsHnd, &otherSimdSize); var_types otherBaseType = JitType2PreciseVarType(otherBaseJitType); assert(otherBaseJitType == simdBaseJitType); int immLowerBound2 = 0; int immUpperBound2 = 0; HWIntrinsicInfo::lookupImmBounds(intrinsic, otherSimdSize, otherBaseType, &immLowerBound2, &immUpperBound2); const int immVal2 = (int)immOp2->AsIntCon()->IconValue(); if ((immVal2 < immLowerBound2) || (immVal2 > immUpperBound2)) { assert(!mustExpand); return nullptr; } } else #endif if ((sig->numArgs > 0) && HWIntrinsicInfo::isImmOp(intrinsic, impStackTop().val)) { // NOTE: The following code assumes that for all intrinsics // taking an immediate operand, that operand will be last. immOp = impStackTop().val; } const unsigned simdSize = HWIntrinsicInfo::lookupSimdSize(this, intrinsic, sig); int immLowerBound = 0; int immUpperBound = 0; bool hasFullRangeImm = false; if (immOp != nullptr) { #ifdef TARGET_XARCH immUpperBound = HWIntrinsicInfo::lookupImmUpperBound(intrinsic); hasFullRangeImm = HWIntrinsicInfo::HasFullRangeImm(intrinsic); #elif defined(TARGET_ARM64) if (category == HW_Category_SIMDByIndexedElement) { CorInfoType indexedElementBaseJitType; var_types indexedElementBaseType; unsigned int indexedElementSimdSize = 0; if (numArgs == 3) { indexedElementBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sigReader.op2ClsHnd, &indexedElementSimdSize); indexedElementBaseType = JitType2PreciseVarType(indexedElementBaseJitType); } else { assert(numArgs == 4); indexedElementBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sigReader.op3ClsHnd, &indexedElementSimdSize); indexedElementBaseType = JitType2PreciseVarType(indexedElementBaseJitType); if (intrinsic == NI_Dp_DotProductBySelectedQuadruplet) { assert(((simdBaseType == TYP_INT) && (indexedElementBaseType == TYP_BYTE)) || ((simdBaseType == TYP_UINT) && (indexedElementBaseType == TYP_UBYTE))); // The second source operand of sdot, udot instructions is an indexed 32-bit element. indexedElementBaseJitType = simdBaseJitType; indexedElementBaseType = simdBaseType; } } assert(indexedElementBaseType == simdBaseType); HWIntrinsicInfo::lookupImmBounds(intrinsic, indexedElementSimdSize, simdBaseType, &immLowerBound, &immUpperBound); } else { HWIntrinsicInfo::lookupImmBounds(intrinsic, simdSize, simdBaseType, &immLowerBound, &immUpperBound); } #endif if (!hasFullRangeImm && immOp->IsCnsIntOrI()) { const int ival = (int)immOp->AsIntCon()->IconValue(); bool immOutOfRange; #ifdef TARGET_XARCH if (HWIntrinsicInfo::isAVX2GatherIntrinsic(intrinsic)) { immOutOfRange = (ival != 1) && (ival != 2) && (ival != 4) && (ival != 8); } else #endif { immOutOfRange = (ival < immLowerBound) || (ival > immUpperBound); } if (immOutOfRange) { assert(!mustExpand); // The imm-HWintrinsics that do not accept all imm8 values may throw // ArgumentOutOfRangeException when the imm argument is not in the valid range return nullptr; } } else if (!immOp->IsCnsIntOrI()) { if (HWIntrinsicInfo::NoJmpTableImm(intrinsic)) { return impNonConstFallback(intrinsic, retType, simdBaseJitType); } else if (!mustExpand) { // When the imm-argument is not a constant and we are not being forced to expand, we need to // return nullptr so a GT_CALL to the intrinsic method is emitted instead. The // intrinsic method is recursive and will be forced to expand, at which point // we emit some less efficient fallback code. return nullptr; } } } if (HWIntrinsicInfo::IsFloatingPointUsed(intrinsic)) { // Set `compFloatingPointUsed` to cover the scenario where an intrinsic is operating on SIMD fields, but // where no SIMD local vars are in use. This is the same logic as is used for FEATURE_SIMD. compFloatingPointUsed = true; } // table-driven importer of simple intrinsics if (impIsTableDrivenHWIntrinsic(intrinsic, category)) { const bool isScalar = (category == HW_Category_Scalar); assert(numArgs >= 0); if (!isScalar && ((HWIntrinsicInfo::lookupIns(intrinsic, simdBaseType) == INS_invalid) || ((simdSize != 8) && (simdSize != 16) && (simdSize != 32)))) { assert(!"Unexpected HW Intrinsic"); return nullptr; } GenTree* op1 = nullptr; GenTree* op2 = nullptr; GenTree* op3 = nullptr; GenTree* op4 = nullptr; GenTreeHWIntrinsic* retNode = nullptr; switch (numArgs) { case 0: assert(!isScalar); retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, simdBaseJitType, simdSize); break; case 1: op1 = getArgForHWIntrinsic(sigReader.GetOp1Type(), sigReader.op1ClsHnd); if ((category == HW_Category_MemoryLoad) && op1->OperIs(GT_CAST)) { // Although the API specifies a pointer, if what we have is a BYREF, that's what // we really want, so throw away the cast. if (op1->gtGetOp1()->TypeGet() == TYP_BYREF) { op1 = op1->gtGetOp1(); } } retNode = isScalar ? gtNewScalarHWIntrinsicNode(retType, op1, intrinsic) : gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); #if defined(TARGET_XARCH) switch (intrinsic) { case NI_SSE41_ConvertToVector128Int16: case NI_SSE41_ConvertToVector128Int32: case NI_SSE41_ConvertToVector128Int64: case NI_AVX2_BroadcastScalarToVector128: case NI_AVX2_BroadcastScalarToVector256: case NI_AVX2_ConvertToVector256Int16: case NI_AVX2_ConvertToVector256Int32: case NI_AVX2_ConvertToVector256Int64: { // These intrinsics have both pointer and vector overloads // We want to be able to differentiate between them so lets // just track the aux type as a ptr or undefined, depending CorInfoType auxiliaryType = CORINFO_TYPE_UNDEF; if (!varTypeIsSIMD(op1->TypeGet())) { auxiliaryType = CORINFO_TYPE_PTR; } retNode->AsHWIntrinsic()->SetAuxiliaryJitType(auxiliaryType); break; } default: { break; } } #endif // TARGET_XARCH break; case 2: op2 = getArgForHWIntrinsic(sigReader.GetOp2Type(), sigReader.op2ClsHnd); op2 = addRangeCheckIfNeeded(intrinsic, op2, mustExpand, immLowerBound, immUpperBound); op1 = getArgForHWIntrinsic(sigReader.GetOp1Type(), sigReader.op1ClsHnd); retNode = isScalar ? gtNewScalarHWIntrinsicNode(retType, op1, op2, intrinsic) : gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); #ifdef TARGET_XARCH if ((intrinsic == NI_SSE42_Crc32) || (intrinsic == NI_SSE42_X64_Crc32)) { // TODO-XArch-Cleanup: currently we use the simdBaseJitType to bring the type of the second argument // to the code generator. May encode the overload info in other way. retNode->AsHWIntrinsic()->SetSimdBaseJitType(sigReader.op2JitType); } #elif defined(TARGET_ARM64) switch (intrinsic) { case NI_Crc32_ComputeCrc32: case NI_Crc32_ComputeCrc32C: case NI_Crc32_Arm64_ComputeCrc32: case NI_Crc32_Arm64_ComputeCrc32C: retNode->AsHWIntrinsic()->SetSimdBaseJitType(sigReader.op2JitType); break; case NI_AdvSimd_AddWideningUpper: case NI_AdvSimd_SubtractWideningUpper: assert(varTypeIsSIMD(op1->TypeGet())); retNode->AsHWIntrinsic()->SetAuxiliaryJitType(getBaseJitTypeOfSIMDType(sigReader.op1ClsHnd)); break; case NI_AdvSimd_Arm64_AddSaturateScalar: assert(varTypeIsSIMD(op2->TypeGet())); retNode->AsHWIntrinsic()->SetAuxiliaryJitType(getBaseJitTypeOfSIMDType(sigReader.op2ClsHnd)); break; case NI_ArmBase_Arm64_MultiplyHigh: if (sig->retType == CORINFO_TYPE_ULONG) { retNode->AsHWIntrinsic()->SetSimdBaseJitType(CORINFO_TYPE_ULONG); } else { assert(sig->retType == CORINFO_TYPE_LONG); retNode->AsHWIntrinsic()->SetSimdBaseJitType(CORINFO_TYPE_LONG); } break; default: break; } #endif break; case 3: op3 = getArgForHWIntrinsic(sigReader.GetOp3Type(), sigReader.op3ClsHnd); op2 = getArgForHWIntrinsic(sigReader.GetOp2Type(), sigReader.op2ClsHnd); op1 = getArgForHWIntrinsic(sigReader.GetOp1Type(), sigReader.op1ClsHnd); #ifdef TARGET_ARM64 if (intrinsic == NI_AdvSimd_LoadAndInsertScalar) { op2 = addRangeCheckIfNeeded(intrinsic, op2, mustExpand, immLowerBound, immUpperBound); if (op1->OperIs(GT_CAST)) { // Although the API specifies a pointer, if what we have is a BYREF, that's what // we really want, so throw away the cast. if (op1->gtGetOp1()->TypeGet() == TYP_BYREF) { op1 = op1->gtGetOp1(); } } } else if ((intrinsic == NI_AdvSimd_Insert) || (intrinsic == NI_AdvSimd_InsertScalar)) { op2 = addRangeCheckIfNeeded(intrinsic, op2, mustExpand, immLowerBound, immUpperBound); } else #endif { op3 = addRangeCheckIfNeeded(intrinsic, op3, mustExpand, immLowerBound, immUpperBound); } retNode = isScalar ? gtNewScalarHWIntrinsicNode(retType, op1, op2, op3, intrinsic) : gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); #ifdef TARGET_XARCH if ((intrinsic == NI_AVX2_GatherVector128) || (intrinsic == NI_AVX2_GatherVector256)) { assert(varTypeIsSIMD(op2->TypeGet())); retNode->AsHWIntrinsic()->SetAuxiliaryJitType(getBaseJitTypeOfSIMDType(sigReader.op2ClsHnd)); } #endif break; #ifdef TARGET_ARM64 case 4: op4 = getArgForHWIntrinsic(sigReader.GetOp4Type(), sigReader.op4ClsHnd); op4 = addRangeCheckIfNeeded(intrinsic, op4, mustExpand, immLowerBound, immUpperBound); op3 = getArgForHWIntrinsic(sigReader.GetOp3Type(), sigReader.op3ClsHnd); op2 = getArgForHWIntrinsic(sigReader.GetOp2Type(), sigReader.op2ClsHnd); op1 = getArgForHWIntrinsic(sigReader.GetOp1Type(), sigReader.op1ClsHnd); assert(!isScalar); retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, op4, intrinsic, simdBaseJitType, simdSize); break; #endif default: return nullptr; } const bool isMemoryStore = retNode->OperIsMemoryStore(); if (isMemoryStore || retNode->OperIsMemoryLoad()) { if (isMemoryStore) { // A MemoryStore operation is an assignment retNode->gtFlags |= GTF_ASG; } // This operation contains an implicit indirection // it could point into the global heap or // it could throw a null reference exception. // retNode->gtFlags |= (GTF_GLOB_REF | GTF_EXCEPT); } return retNode; } return impSpecialIntrinsic(intrinsic, clsHnd, method, sig, simdBaseJitType, retType, simdSize); } #endif // FEATURE_HW_INTRINSICS
1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/jit/hwintrinsicarm64.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "jitpch.h" #include "hwintrinsic.h" #ifdef FEATURE_HW_INTRINSICS //------------------------------------------------------------------------ // Arm64VersionOfIsa: Gets the corresponding 64-bit only InstructionSet for a given InstructionSet // // Arguments: // isa -- The InstructionSet ID // // Return Value: // The 64-bit only InstructionSet associated with isa static CORINFO_InstructionSet Arm64VersionOfIsa(CORINFO_InstructionSet isa) { switch (isa) { case InstructionSet_AdvSimd: return InstructionSet_AdvSimd_Arm64; case InstructionSet_Aes: return InstructionSet_Aes_Arm64; case InstructionSet_ArmBase: return InstructionSet_ArmBase_Arm64; case InstructionSet_Crc32: return InstructionSet_Crc32_Arm64; case InstructionSet_Dp: return InstructionSet_Dp_Arm64; case InstructionSet_Sha1: return InstructionSet_Sha1_Arm64; case InstructionSet_Sha256: return InstructionSet_Sha256_Arm64; case InstructionSet_Rdm: return InstructionSet_Rdm_Arm64; default: return InstructionSet_NONE; } } //------------------------------------------------------------------------ // lookupInstructionSet: Gets the InstructionSet for a given class name // // Arguments: // className -- The name of the class associated with the InstructionSet to lookup // // Return Value: // The InstructionSet associated with className static CORINFO_InstructionSet lookupInstructionSet(const char* className) { assert(className != nullptr); if (className[0] == 'A') { if (strcmp(className, "AdvSimd") == 0) { return InstructionSet_AdvSimd; } if (strcmp(className, "Aes") == 0) { return InstructionSet_Aes; } if (strcmp(className, "ArmBase") == 0) { return InstructionSet_ArmBase; } } else if (className[0] == 'C') { if (strcmp(className, "Crc32") == 0) { return InstructionSet_Crc32; } } else if (className[0] == 'D') { if (strcmp(className, "Dp") == 0) { return InstructionSet_Dp; } } else if (className[0] == 'R') { if (strcmp(className, "Rdm") == 0) { return InstructionSet_Rdm; } } else if (className[0] == 'S') { if (strcmp(className, "Sha1") == 0) { return InstructionSet_Sha1; } if (strcmp(className, "Sha256") == 0) { return InstructionSet_Sha256; } } else if (className[0] == 'V') { if (strncmp(className, "Vector64", 8) == 0) { return InstructionSet_Vector64; } else if (strncmp(className, "Vector128", 9) == 0) { return InstructionSet_Vector128; } } return InstructionSet_ILLEGAL; } //------------------------------------------------------------------------ // lookupIsa: Gets the InstructionSet for a given class name and enclsoing class name // // Arguments: // className -- The name of the class associated with the InstructionSet to lookup // enclosingClassName -- The name of the enclosing class or nullptr if one doesn't exist // // Return Value: // The InstructionSet associated with className and enclosingClassName CORINFO_InstructionSet HWIntrinsicInfo::lookupIsa(const char* className, const char* enclosingClassName) { assert(className != nullptr); if (strcmp(className, "Arm64") == 0) { assert(enclosingClassName != nullptr); return Arm64VersionOfIsa(lookupInstructionSet(enclosingClassName)); } else { return lookupInstructionSet(className); } } //------------------------------------------------------------------------ // isFullyImplementedIsa: Gets a value that indicates whether the InstructionSet is fully implemented // // Arguments: // isa - The InstructionSet to check // // Return Value: // true if isa is supported; otherwise, false bool HWIntrinsicInfo::isFullyImplementedIsa(CORINFO_InstructionSet isa) { switch (isa) { // These ISAs are fully implemented case InstructionSet_AdvSimd: case InstructionSet_AdvSimd_Arm64: case InstructionSet_Aes: case InstructionSet_Aes_Arm64: case InstructionSet_ArmBase: case InstructionSet_ArmBase_Arm64: case InstructionSet_Crc32: case InstructionSet_Crc32_Arm64: case InstructionSet_Dp: case InstructionSet_Dp_Arm64: case InstructionSet_Rdm: case InstructionSet_Rdm_Arm64: case InstructionSet_Sha1: case InstructionSet_Sha1_Arm64: case InstructionSet_Sha256: case InstructionSet_Sha256_Arm64: case InstructionSet_Vector64: case InstructionSet_Vector128: return true; default: return false; } } //------------------------------------------------------------------------ // isScalarIsa: Gets a value that indicates whether the InstructionSet is scalar // // Arguments: // isa - The InstructionSet to check // // Return Value: // true if isa is scalar; otherwise, false bool HWIntrinsicInfo::isScalarIsa(CORINFO_InstructionSet isa) { switch (isa) { case InstructionSet_ArmBase: case InstructionSet_ArmBase_Arm64: case InstructionSet_Crc32: case InstructionSet_Crc32_Arm64: { return true; } default: { return false; } } } //------------------------------------------------------------------------ // lookupImmBounds: Gets the lower and upper bounds for the imm-value of a given NamedIntrinsic // // Arguments: // intrinsic -- NamedIntrinsic associated with the HWIntrinsic to lookup // simdType -- vector size // baseType -- base type of the Vector64/128<T> // pImmLowerBound [OUT] - The lower incl. bound for a value of the intrinsic immediate operand // pImmUpperBound [OUT] - The upper incl. bound for a value of the intrinsic immediate operand // void HWIntrinsicInfo::lookupImmBounds( NamedIntrinsic intrinsic, int simdSize, var_types baseType, int* pImmLowerBound, int* pImmUpperBound) { HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsic); bool hasImmediateOperand = HasImmediateOperand(intrinsic); assert(hasImmediateOperand); assert(pImmLowerBound != nullptr); assert(pImmUpperBound != nullptr); int immLowerBound = 0; int immUpperBound = 0; if (category == HW_Category_ShiftLeftByImmediate) { // The left shift amount is in the range 0 to the element width in bits minus 1. immUpperBound = BITS_PER_BYTE * genTypeSize(baseType) - 1; } else if (category == HW_Category_ShiftRightByImmediate) { // The right shift amount, in the range 1 to the element width in bits. immLowerBound = 1; immUpperBound = BITS_PER_BYTE * genTypeSize(baseType); } else if (category == HW_Category_SIMDByIndexedElement) { immUpperBound = Compiler::getSIMDVectorLength(simdSize, baseType) - 1; } else { switch (intrinsic) { case NI_AdvSimd_DuplicateSelectedScalarToVector64: case NI_AdvSimd_DuplicateSelectedScalarToVector128: case NI_AdvSimd_Extract: case NI_AdvSimd_ExtractVector128: case NI_AdvSimd_ExtractVector64: case NI_AdvSimd_Insert: case NI_AdvSimd_InsertScalar: case NI_AdvSimd_LoadAndInsertScalar: case NI_AdvSimd_StoreSelectedScalar: case NI_AdvSimd_Arm64_DuplicateSelectedScalarToVector128: case NI_AdvSimd_Arm64_InsertSelectedScalar: immUpperBound = Compiler::getSIMDVectorLength(simdSize, baseType) - 1; break; default: unreached(); } } assert(immLowerBound <= immUpperBound); *pImmLowerBound = immLowerBound; *pImmUpperBound = immUpperBound; } //------------------------------------------------------------------------ // impNonConstFallback: generate alternate code when the imm-arg is not a compile-time constant // // Arguments: // intrinsic -- intrinsic ID // simdType -- Vector type // simdBaseJitType -- base JIT type of the Vector64/128<T> // // Return Value: // return the IR of semantic alternative on non-const imm-arg // GenTree* Compiler::impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, CorInfoType simdBaseJitType) { return nullptr; } //------------------------------------------------------------------------ // impSpecialIntrinsic: Import a hardware intrinsic that requires special handling as a GT_HWINTRINSIC node if possible // // Arguments: // intrinsic -- id of the intrinsic function. // clsHnd -- class handle containing the intrinsic function. // method -- method handle of the intrinsic function. // sig -- signature of the intrinsic call. // simdBaseJitType -- generic argument of the intrinsic. // retType -- return type of the intrinsic. // // Return Value: // The GT_HWINTRINSIC node, or nullptr if not a supported intrinsic // GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType, var_types retType, unsigned simdSize) { const HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsic); const int numArgs = sig->numArgs; // The vast majority of "special" intrinsics are Vector64/Vector128 methods. // The only exception is ArmBase.Yield which should be treated differently. if (intrinsic == NI_ArmBase_Yield) { assert(sig->numArgs == 0); assert(JITtype2varType(sig->retType) == TYP_VOID); assert(simdSize == 0); return gtNewScalarHWIntrinsicNode(TYP_VOID, intrinsic); } assert(category != HW_Category_Scalar); assert(!HWIntrinsicInfo::isScalarIsa(HWIntrinsicInfo::lookupIsa(intrinsic))); if (!featureSIMD || !IsBaselineSimdIsaSupported()) { return nullptr; } assert(numArgs >= 0); var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); GenTree* retNode = nullptr; GenTree* op1 = nullptr; GenTree* op2 = nullptr; GenTree* op3 = nullptr; GenTree* op4 = nullptr; switch (intrinsic) { case NI_Vector64_Abs: case NI_Vector128_Abs: { assert(sig->numArgs == 1); op1 = impSIMDPopStack(retType); retNode = gtNewSimdAbsNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_Add: case NI_Vector128_Add: case NI_Vector64_op_Addition: case NI_Vector128_op_Addition: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_AndNot: case NI_Vector128_AndNot: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdBinOpNode(GT_AND_NOT, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_As: case NI_Vector64_AsByte: case NI_Vector64_AsDouble: case NI_Vector64_AsInt16: case NI_Vector64_AsInt32: case NI_Vector64_AsInt64: case NI_Vector64_AsSByte: case NI_Vector64_AsSingle: case NI_Vector64_AsUInt16: case NI_Vector64_AsUInt32: case NI_Vector64_AsUInt64: case NI_Vector128_As: case NI_Vector128_AsByte: case NI_Vector128_AsDouble: case NI_Vector128_AsInt16: case NI_Vector128_AsInt32: case NI_Vector128_AsInt64: case NI_Vector128_AsSByte: case NI_Vector128_AsSingle: case NI_Vector128_AsUInt16: case NI_Vector128_AsUInt32: case NI_Vector128_AsUInt64: case NI_Vector128_AsVector: case NI_Vector128_AsVector4: case NI_Vector128_AsVector128: { assert(!sig->hasThis()); assert(numArgs == 1); // We fold away the cast here, as it only exists to satisfy // the type system. It is safe to do this here since the retNode type // and the signature return type are both the same TYP_SIMD. retNode = impSIMDPopStack(retType, /* expectAddr: */ false, sig->retTypeClass); SetOpLclRelatedToSIMDIntrinsic(retNode); assert(retNode->gtType == getSIMDTypeForSize(getSIMDTypeSizeInBytes(sig->retTypeSigClass))); break; } case NI_Vector64_BitwiseAnd: case NI_Vector128_BitwiseAnd: case NI_Vector64_op_BitwiseAnd: case NI_Vector128_op_BitwiseAnd: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_BitwiseOr: case NI_Vector128_BitwiseOr: case NI_Vector64_op_BitwiseOr: case NI_Vector128_op_BitwiseOr: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdBinOpNode(GT_OR, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_Ceiling: case NI_Vector128_Ceiling: { assert(sig->numArgs == 1); assert(varTypeIsFloating(simdBaseType)); op1 = impSIMDPopStack(retType); retNode = gtNewSimdCeilNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_ConditionalSelect: case NI_Vector128_ConditionalSelect: { assert(sig->numArgs == 3); op3 = impSIMDPopStack(retType); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdCndSelNode(retType, op1, op2, op3, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_ConvertToDouble: case NI_Vector128_ConvertToDouble: { assert(sig->numArgs == 1); assert((simdBaseType == TYP_LONG) || (simdBaseType == TYP_ULONG)); intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_ConvertToDoubleScalar : NI_AdvSimd_Arm64_ConvertToDouble; op1 = impSIMDPopStack(retType); retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); break; } case NI_Vector64_ConvertToInt32: case NI_Vector128_ConvertToInt32: { assert(sig->numArgs == 1); assert(simdBaseType == TYP_FLOAT); op1 = impSIMDPopStack(retType); retNode = gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_ConvertToInt32RoundToZero, simdBaseJitType, simdSize); break; } case NI_Vector64_ConvertToInt64: case NI_Vector128_ConvertToInt64: { assert(sig->numArgs == 1); assert(simdBaseType == TYP_DOUBLE); intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_ConvertToInt64RoundToZeroScalar : NI_AdvSimd_Arm64_ConvertToInt64RoundToZero; op1 = impSIMDPopStack(retType); retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); break; } case NI_Vector64_ConvertToSingle: case NI_Vector128_ConvertToSingle: { assert(sig->numArgs == 1); assert((simdBaseType == TYP_INT) || (simdBaseType == TYP_UINT)); op1 = impSIMDPopStack(retType); retNode = gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_ConvertToSingle, simdBaseJitType, simdSize); break; } case NI_Vector64_ConvertToUInt32: case NI_Vector128_ConvertToUInt32: { assert(sig->numArgs == 1); assert(simdBaseType == TYP_FLOAT); op1 = impSIMDPopStack(retType); retNode = gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_ConvertToUInt32RoundToZero, simdBaseJitType, simdSize); break; } case NI_Vector64_ConvertToUInt64: case NI_Vector128_ConvertToUInt64: { assert(sig->numArgs == 1); assert(simdBaseType == TYP_DOUBLE); intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_ConvertToUInt64RoundToZeroScalar : NI_AdvSimd_Arm64_ConvertToUInt64RoundToZero; op1 = impSIMDPopStack(retType); retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); break; } case NI_Vector64_Create: case NI_Vector128_Create: { // We shouldn't handle this as an intrinsic if the // respective ISAs have been disabled by the user. IntrinsicNodeBuilder nodeBuilder(getAllocator(CMK_ASTNode), sig->numArgs); for (int i = sig->numArgs - 1; i >= 0; i--) { nodeBuilder.AddOperand(i, impPopStack().val); } retNode = gtNewSimdHWIntrinsicNode(retType, std::move(nodeBuilder), intrinsic, simdBaseJitType, simdSize); break; } case NI_Vector64_get_Count: case NI_Vector128_get_Count: { assert(!sig->hasThis()); assert(numArgs == 0); GenTreeIntCon* countNode = gtNewIconNode(getSIMDVectorLength(simdSize, simdBaseType), TYP_INT); countNode->gtFlags |= GTF_ICON_SIMD_COUNT; retNode = countNode; break; } case NI_Vector64_Divide: case NI_Vector128_Divide: case NI_Vector64_op_Division: case NI_Vector128_op_Division: { assert(sig->numArgs == 2); if (varTypeIsFloating(simdBaseType)) { op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdBinOpNode(GT_DIV, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); } break; } case NI_Vector64_Dot: case NI_Vector128_Dot: { assert(sig->numArgs == 2); if (!varTypeIsLong(simdBaseType)) { var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdDotProdNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); } break; } case NI_Vector64_Equals: case NI_Vector128_Equals: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdCmpOpNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_EqualsAll: case NI_Vector128_EqualsAll: case NI_Vector64_op_Equality: case NI_Vector128_op_Equality: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdCmpOpAllNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_EqualsAny: case NI_Vector128_EqualsAny: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdCmpOpAnyNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_ExtractMostSignificantBits: case NI_Vector128_ExtractMostSignificantBits: { assert(sig->numArgs == 1); // ARM64 doesn't have a single instruction that performs the behavior so we'll emulate it instead. // To do this, we effectively perform the following steps: // 1. tmp = input & 0x80 ; and the input to clear all but the most significant bit // 2. tmp = tmp >> index ; right shift each element by its index // 3. tmp = sum(tmp) ; sum the elements together // For byte/sbyte, we also need to handle the fact that we can only shift by up to 8 // but for Vector128, we have 16 elements to handle. In that scenario, we will simply // extract both scalars, and combine them via: (upper << 8) | lower var_types simdType = getSIMDTypeForSize(simdSize); op1 = impSIMDPopStack(simdType); GenTree* vectorCreateOp1 = nullptr; GenTree* vectorCreateOp2 = nullptr; CorInfoType vectorCreateType = varTypeIsUnsigned(simdBaseType) ? CORINFO_TYPE_ULONG : CORINFO_TYPE_LONG; switch (simdBaseType) { case TYP_BYTE: case TYP_UBYTE: { op2 = gtNewIconNode(0x80); simdBaseType = TYP_UBYTE; simdBaseJitType = CORINFO_TYPE_UBYTE; vectorCreateOp1 = gtNewLconNode(0x00FFFEFDFCFBFAF9); if (simdSize == 16) { vectorCreateOp2 = gtNewLconNode(0x00FFFEFDFCFBFAF9); } break; } case TYP_SHORT: case TYP_USHORT: { op2 = gtNewIconNode(0x8000); vectorCreateOp1 = gtNewLconNode(0xFFF4FFF3FFF2FFF1); if (simdSize == 16) { vectorCreateOp2 = gtNewLconNode(0xFFF8FFF7FFF6FFF5); } break; } case TYP_INT: case TYP_UINT: case TYP_FLOAT: { op2 = gtNewIconNode(0x80000000); simdBaseType = TYP_INT; simdBaseJitType = CORINFO_TYPE_INT; vectorCreateOp1 = gtNewLconNode(0xFFFFFFE2FFFFFFE1); if (simdSize == 16) { vectorCreateOp2 = gtNewLconNode(0xFFFFFFE4FFFFFFE3); } break; } case TYP_LONG: case TYP_ULONG: case TYP_DOUBLE: { op2 = gtNewLconNode(0x8000000000000000); simdBaseType = TYP_LONG; simdBaseJitType = CORINFO_TYPE_LONG; vectorCreateOp1 = gtNewLconNode(0xFFFFFFFFFFFFFFC1); if (simdSize == 16) { vectorCreateOp2 = gtNewLconNode(0xFFFFFFFFFFFFFFC2); } break; } default: { unreached(); } } if (simdSize == 16) { op3 = gtNewSimdHWIntrinsicNode(simdType, vectorCreateOp1, vectorCreateOp2, NI_Vector128_Create, vectorCreateType, simdSize); } else { op3 = gtNewSimdHWIntrinsicNode(simdType, vectorCreateOp1, NI_Vector64_Create, vectorCreateType, simdSize); } op2 = gtNewSimdCreateBroadcastNode(simdType, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); op1 = gtNewSimdHWIntrinsicNode(simdType, op1, op2, NI_AdvSimd_And, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); NamedIntrinsic shiftIntrinsic = NI_AdvSimd_ShiftLogical; if ((simdSize == 8) && varTypeIsLong(simdBaseType)) { shiftIntrinsic = NI_AdvSimd_ShiftLogicalScalar; } op1 = gtNewSimdHWIntrinsicNode(simdType, op1, op3, shiftIntrinsic, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); if (varTypeIsByte(simdBaseType) && (simdSize == 16)) { CORINFO_CLASS_HANDLE simdClsHnd = gtGetStructHandleForSIMD(simdType, simdBaseJitType); op1 = impCloneExpr(op1, &op2, simdClsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector extractmostsignificantbits")); op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_Vector128_GetLower, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 8, /* isSimdAsHWIntrinsic */ false); op1 = gtNewSimdHWIntrinsicNode(simdBaseType, op1, NI_Vector64_ToScalar, simdBaseJitType, 8, /* isSimdAsHWIntrinsic */ false); op1 = gtNewCastNode(TYP_INT, op1, /* isUnsigned */ true, TYP_INT); GenTree* zero = gtNewSimdZeroNode(simdType, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); ssize_t index = 8 / genTypeSize(simdBaseType); op2 = gtNewSimdHWIntrinsicNode(simdType, op2, zero, gtNewIconNode(index), NI_AdvSimd_ExtractVector128, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op2, NI_Vector128_GetLower, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op2, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 8, /* isSimdAsHWIntrinsic */ false); op2 = gtNewSimdHWIntrinsicNode(simdBaseType, op2, NI_Vector64_ToScalar, simdBaseJitType, 8, /* isSimdAsHWIntrinsic */ false); op2 = gtNewCastNode(TYP_INT, op2, /* isUnsigned */ true, TYP_INT); op2 = gtNewOperNode(GT_LSH, TYP_INT, op2, gtNewIconNode(8)); retNode = gtNewOperNode(GT_OR, TYP_INT, op1, op2); } else { if (!varTypeIsLong(simdBaseType)) { if ((simdSize == 8) && ((simdBaseType == TYP_INT) || (simdBaseType == TYP_UINT))) { CORINFO_CLASS_HANDLE simdClsHnd = gtGetStructHandleForSIMD(simdType, simdBaseJitType); op1 = impCloneExpr(op1, &op2, simdClsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector extractmostsignificantbits")); op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, op2, NI_AdvSimd_AddPairwise, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); } else { op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); } } else if (simdSize == 16) { op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); } retNode = gtNewSimdHWIntrinsicNode(simdBaseType, op1, NI_Vector64_ToScalar, simdBaseJitType, 8, /* isSimdAsHWIntrinsic */ false); if ((simdBaseType != TYP_INT) && (simdBaseType != TYP_UINT)) { retNode = gtNewCastNode(TYP_INT, retNode, /* isUnsigned */ true, TYP_INT); } } break; } case NI_Vector64_Floor: case NI_Vector128_Floor: { assert(sig->numArgs == 1); assert(varTypeIsFloating(simdBaseType)); op1 = impSIMDPopStack(retType); retNode = gtNewSimdFloorNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_get_AllBitsSet: case NI_Vector128_get_AllBitsSet: { assert(!sig->hasThis()); assert(numArgs == 0); retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, simdBaseJitType, simdSize); break; } case NI_Vector64_get_Zero: case NI_Vector128_get_Zero: { assert(sig->numArgs == 0); retNode = gtNewSimdZeroNode(retType, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_GetElement: case NI_Vector128_GetElement: { assert(!sig->hasThis()); assert(numArgs == 2); op2 = impPopStack().val; op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize)); const bool isSimdAsHWIntrinsic = true; retNode = gtNewSimdGetElementNode(retType, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); break; } case NI_Vector128_GetUpper: { // Converts to equivalent managed code: // AdvSimd.ExtractVector128(vector, Vector128<T>.Zero, 8 / sizeof(T)).GetLower(); assert(numArgs == 1); op1 = impPopStack().val; GenTree* zero = gtNewSimdHWIntrinsicNode(retType, NI_Vector128_get_Zero, simdBaseJitType, simdSize); ssize_t index = 8 / genTypeSize(simdBaseType); retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, zero, gtNewIconNode(index), NI_AdvSimd_ExtractVector128, simdBaseJitType, simdSize); retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD8, retNode, NI_Vector128_GetLower, simdBaseJitType, 8); break; } case NI_Vector64_GreaterThan: case NI_Vector128_GreaterThan: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdCmpOpNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_GreaterThanAll: case NI_Vector128_GreaterThanAll: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdCmpOpAllNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_GreaterThanAny: case NI_Vector128_GreaterThanAny: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdCmpOpAnyNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_GreaterThanOrEqual: case NI_Vector128_GreaterThanOrEqual: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdCmpOpNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_GreaterThanOrEqualAll: case NI_Vector128_GreaterThanOrEqualAll: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdCmpOpAllNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_GreaterThanOrEqualAny: case NI_Vector128_GreaterThanOrEqualAny: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdCmpOpAnyNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_LessThan: case NI_Vector128_LessThan: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdCmpOpNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_LessThanAll: case NI_Vector128_LessThanAll: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdCmpOpAllNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_LessThanAny: case NI_Vector128_LessThanAny: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdCmpOpAnyNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_LessThanOrEqual: case NI_Vector128_LessThanOrEqual: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdCmpOpNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_LessThanOrEqualAll: case NI_Vector128_LessThanOrEqualAll: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdCmpOpAllNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_LessThanOrEqualAny: case NI_Vector128_LessThanOrEqualAny: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdCmpOpAnyNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_Load: case NI_Vector128_Load: { assert(sig->numArgs == 1); op1 = impPopStack().val; if (op1->OperIs(GT_CAST)) { // Although the API specifies a pointer, if what we have is a BYREF, that's what // we really want, so throw away the cast. if (op1->gtGetOp1()->TypeGet() == TYP_BYREF) { op1 = op1->gtGetOp1(); } } NamedIntrinsic loadIntrinsic = NI_Illegal; if (simdSize == 16) { loadIntrinsic = NI_AdvSimd_LoadVector128; } else { loadIntrinsic = NI_AdvSimd_LoadVector64; } retNode = gtNewSimdHWIntrinsicNode(retType, op1, loadIntrinsic, simdBaseJitType, simdSize); break; } case NI_Vector64_LoadAligned: case NI_Vector128_LoadAligned: { assert(sig->numArgs == 1); if (!opts.MinOpts()) { // ARM64 doesn't have aligned loads, but aligned loads are only validated to be // aligned during minopts, so only skip the intrinsic handling if we're minopts break; } op1 = impPopStack().val; if (op1->OperIs(GT_CAST)) { // Although the API specifies a pointer, if what we have is a BYREF, that's what // we really want, so throw away the cast. if (op1->gtGetOp1()->TypeGet() == TYP_BYREF) { op1 = op1->gtGetOp1(); } } NamedIntrinsic loadIntrinsic = NI_Illegal; if (simdSize == 16) { loadIntrinsic = NI_AdvSimd_LoadVector128; } else { loadIntrinsic = NI_AdvSimd_LoadVector64; } retNode = gtNewSimdHWIntrinsicNode(retType, op1, loadIntrinsic, simdBaseJitType, simdSize); break; } case NI_Vector64_LoadAlignedNonTemporal: case NI_Vector128_LoadAlignedNonTemporal: { assert(sig->numArgs == 1); if (!opts.MinOpts()) { // ARM64 doesn't have aligned loads, but aligned loads are only validated to be // aligned during minopts, so only skip the intrinsic handling if we're minopts break; } op1 = impPopStack().val; if (op1->OperIs(GT_CAST)) { // Although the API specifies a pointer, if what we have is a BYREF, that's what // we really want, so throw away the cast. if (op1->gtGetOp1()->TypeGet() == TYP_BYREF) { op1 = op1->gtGetOp1(); } } // ARM64 has non-temporal loads (LDNP) but we don't currently support them NamedIntrinsic loadIntrinsic = NI_Illegal; if (simdSize == 16) { loadIntrinsic = NI_AdvSimd_LoadVector128; } else { loadIntrinsic = NI_AdvSimd_LoadVector64; } retNode = gtNewSimdHWIntrinsicNode(retType, op1, loadIntrinsic, simdBaseJitType, simdSize); break; } case NI_Vector64_LoadUnsafe: case NI_Vector128_LoadUnsafe: { if (sig->numArgs == 2) { op2 = impPopStack().val; } else { assert(sig->numArgs == 1); } op1 = impPopStack().val; if (op1->OperIs(GT_CAST)) { // Although the API specifies a pointer, if what we have is a BYREF, that's what // we really want, so throw away the cast. if (op1->gtGetOp1()->TypeGet() == TYP_BYREF) { op1 = op1->gtGetOp1(); } } if (sig->numArgs == 2) { op3 = gtNewIconNode(genTypeSize(simdBaseType), op2->TypeGet()); op2 = gtNewOperNode(GT_MUL, op2->TypeGet(), op2, op3); op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1, op2); } NamedIntrinsic loadIntrinsic = NI_Illegal; if (simdSize == 16) { loadIntrinsic = NI_AdvSimd_LoadVector128; } else { loadIntrinsic = NI_AdvSimd_LoadVector64; } retNode = gtNewSimdHWIntrinsicNode(retType, op1, loadIntrinsic, simdBaseJitType, simdSize); break; } case NI_Vector64_Max: case NI_Vector128_Max: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdMaxNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_Min: case NI_Vector128_Min: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdMinNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_Multiply: case NI_Vector128_Multiply: case NI_Vector64_op_Multiply: case NI_Vector128_op_Multiply: { assert(sig->numArgs == 2); if (varTypeIsLong(simdBaseType)) { // TODO-ARM64-CQ: We should support long/ulong multiplication. break; } CORINFO_ARG_LIST_HANDLE arg1 = sig->args; CORINFO_ARG_LIST_HANDLE arg2 = info.compCompHnd->getArgNext(arg1); var_types argType = TYP_UNKNOWN; CORINFO_CLASS_HANDLE argClass = NO_CLASS_HANDLE; argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg2, &argClass))); op2 = getArgForHWIntrinsic(argType, argClass); argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg1, &argClass))); op1 = getArgForHWIntrinsic(argType, argClass); retNode = gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_Narrow: case NI_Vector128_Narrow: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdNarrowNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_Negate: case NI_Vector128_Negate: case NI_Vector64_op_UnaryNegation: case NI_Vector128_op_UnaryNegation: { assert(sig->numArgs == 1); op1 = impSIMDPopStack(retType); retNode = gtNewSimdUnOpNode(GT_NEG, retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_OnesComplement: case NI_Vector128_OnesComplement: case NI_Vector64_op_OnesComplement: case NI_Vector128_op_OnesComplement: { assert(sig->numArgs == 1); op1 = impSIMDPopStack(retType); retNode = gtNewSimdUnOpNode(GT_NOT, retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_op_Inequality: case NI_Vector128_op_Inequality: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdCmpOpAnyNode(GT_NE, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_op_UnaryPlus: case NI_Vector128_op_UnaryPlus: { assert(sig->numArgs == 1); retNode = impSIMDPopStack(retType); break; } case NI_Vector64_Subtract: case NI_Vector128_Subtract: case NI_Vector64_op_Subtraction: case NI_Vector128_op_Subtraction: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_ShiftLeft: case NI_Vector128_ShiftLeft: { assert(sig->numArgs == 2); op2 = impPopStack().val; op1 = impSIMDPopStack(retType); retNode = gtNewSimdBinOpNode(GT_LSH, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_ShiftRightArithmetic: case NI_Vector128_ShiftRightArithmetic: { assert(sig->numArgs == 2); op2 = impPopStack().val; op1 = impSIMDPopStack(retType); retNode = gtNewSimdBinOpNode(GT_RSH, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_ShiftRightLogical: case NI_Vector128_ShiftRightLogical: { assert(sig->numArgs == 2); op2 = impPopStack().val; op1 = impSIMDPopStack(retType); retNode = gtNewSimdBinOpNode(GT_RSZ, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_Sqrt: case NI_Vector128_Sqrt: { assert(sig->numArgs == 1); if (varTypeIsFloating(simdBaseType)) { op1 = impSIMDPopStack(retType); retNode = gtNewSimdSqrtNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); } break; } case NI_Vector64_Store: case NI_Vector128_Store: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impPopStack().val; op1 = impSIMDPopStack(simdType); retNode = gtNewSimdHWIntrinsicNode(retType, op2, op1, NI_AdvSimd_Store, simdBaseJitType, simdSize); break; } case NI_Vector64_StoreAligned: case NI_Vector128_StoreAligned: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); if (!opts.MinOpts()) { // ARM64 doesn't have aligned stores, but aligned stores are only validated to be // aligned during minopts, so only skip the intrinsic handling if we're minopts break; } op2 = impPopStack().val; op1 = impSIMDPopStack(simdType); retNode = gtNewSimdHWIntrinsicNode(retType, op2, op1, NI_AdvSimd_Store, simdBaseJitType, simdSize); break; } case NI_Vector64_StoreAlignedNonTemporal: case NI_Vector128_StoreAlignedNonTemporal: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); if (!opts.MinOpts()) { // ARM64 doesn't have aligned stores, but aligned stores are only validated to be // aligned during minopts, so only skip the intrinsic handling if we're minopts break; } op2 = impPopStack().val; op1 = impSIMDPopStack(simdType); // ARM64 has non-temporal stores (STNP) but we don't currently support them retNode = gtNewSimdHWIntrinsicNode(retType, op2, op1, NI_AdvSimd_Store, simdBaseJitType, simdSize); break; } case NI_Vector64_StoreUnsafe: case NI_Vector128_StoreUnsafe: { var_types simdType = getSIMDTypeForSize(simdSize); if (sig->numArgs == 3) { op3 = impPopStack().val; } else { assert(sig->numArgs == 2); } op2 = impPopStack().val; op1 = impSIMDPopStack(simdType); if (sig->numArgs == 3) { op4 = gtNewIconNode(genTypeSize(simdBaseType), op3->TypeGet()); op3 = gtNewOperNode(GT_MUL, op3->TypeGet(), op3, op4); op2 = gtNewOperNode(GT_ADD, op2->TypeGet(), op2, op3); } retNode = gtNewSimdHWIntrinsicNode(retType, op2, op1, NI_AdvSimd_Store, simdBaseJitType, simdSize); break; } case NI_Vector64_Sum: case NI_Vector128_Sum: { assert(sig->numArgs == 1); var_types simdType = getSIMDTypeForSize(simdSize); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdSumNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_WidenLower: case NI_Vector128_WidenLower: { assert(sig->numArgs == 1); op1 = impSIMDPopStack(retType); retNode = gtNewSimdWidenLowerNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_WidenUpper: case NI_Vector128_WidenUpper: { assert(sig->numArgs == 1); op1 = impSIMDPopStack(retType); retNode = gtNewSimdWidenUpperNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_WithElement: case NI_Vector128_WithElement: { assert(numArgs == 3); GenTree* indexOp = impStackTop(1).val; if (!indexOp->OperIsConst()) { // TODO-XARCH-CQ: We should always import these like we do with GetElement // If index is not constant use software fallback. return nullptr; } ssize_t imm8 = indexOp->AsIntCon()->IconValue(); ssize_t count = simdSize / genTypeSize(simdBaseType); if (imm8 >= count || imm8 < 0) { // Using software fallback if index is out of range (throw exeception) return nullptr; } GenTree* valueOp = impPopStack().val; impPopStack(); // pop the indexOp that we already have. GenTree* vectorOp = impSIMDPopStack(getSIMDTypeForSize(simdSize)); retNode = gtNewSimdWithElementNode(retType, vectorOp, indexOp, valueOp, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); break; } case NI_Vector64_Xor: case NI_Vector128_Xor: case NI_Vector64_op_ExclusiveOr: case NI_Vector128_op_ExclusiveOr: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdBinOpNode(GT_XOR, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_AdvSimd_Arm64_LoadPairScalarVector64: case NI_AdvSimd_Arm64_LoadPairScalarVector64NonTemporal: case NI_AdvSimd_Arm64_LoadPairVector128: case NI_AdvSimd_Arm64_LoadPairVector128NonTemporal: case NI_AdvSimd_Arm64_LoadPairVector64: case NI_AdvSimd_Arm64_LoadPairVector64NonTemporal: { op1 = impPopStack().val; if (op1->OperIs(GT_CAST)) { // Although the API specifies a pointer, if what we have is a BYREF, that's what // we really want, so throw away the cast. if (op1->gtGetOp1()->TypeGet() == TYP_BYREF) { op1 = op1->gtGetOp1(); } } GenTree* loadIntrinsic = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); // This operation contains an implicit indirection // it could point into the global heap or // it could throw a null reference exception. // loadIntrinsic->gtFlags |= (GTF_GLOB_REF | GTF_EXCEPT); assert(HWIntrinsicInfo::IsMultiReg(intrinsic)); const unsigned lclNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg intrinsic")); impAssignTempGen(lclNum, loadIntrinsic, sig->retTypeSigClass, (unsigned)CHECK_SPILL_ALL); LclVarDsc* varDsc = lvaGetDesc(lclNum); // The following is to exclude the fields of the local to have SSA. varDsc->lvIsMultiRegRet = true; GenTreeLclVar* lclVar = gtNewLclvNode(lclNum, varDsc->lvType); lclVar->SetDoNotCSE(); lclVar->SetMultiReg(); retNode = lclVar; break; } default: { return nullptr; } } return retNode; } #endif // FEATURE_HW_INTRINSICS
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "jitpch.h" #include "hwintrinsic.h" #ifdef FEATURE_HW_INTRINSICS //------------------------------------------------------------------------ // Arm64VersionOfIsa: Gets the corresponding 64-bit only InstructionSet for a given InstructionSet // // Arguments: // isa -- The InstructionSet ID // // Return Value: // The 64-bit only InstructionSet associated with isa static CORINFO_InstructionSet Arm64VersionOfIsa(CORINFO_InstructionSet isa) { switch (isa) { case InstructionSet_AdvSimd: return InstructionSet_AdvSimd_Arm64; case InstructionSet_Aes: return InstructionSet_Aes_Arm64; case InstructionSet_ArmBase: return InstructionSet_ArmBase_Arm64; case InstructionSet_Crc32: return InstructionSet_Crc32_Arm64; case InstructionSet_Dp: return InstructionSet_Dp_Arm64; case InstructionSet_Sha1: return InstructionSet_Sha1_Arm64; case InstructionSet_Sha256: return InstructionSet_Sha256_Arm64; case InstructionSet_Rdm: return InstructionSet_Rdm_Arm64; default: return InstructionSet_NONE; } } //------------------------------------------------------------------------ // lookupInstructionSet: Gets the InstructionSet for a given class name // // Arguments: // className -- The name of the class associated with the InstructionSet to lookup // // Return Value: // The InstructionSet associated with className static CORINFO_InstructionSet lookupInstructionSet(const char* className) { assert(className != nullptr); if (className[0] == 'A') { if (strcmp(className, "AdvSimd") == 0) { return InstructionSet_AdvSimd; } if (strcmp(className, "Aes") == 0) { return InstructionSet_Aes; } if (strcmp(className, "ArmBase") == 0) { return InstructionSet_ArmBase; } } else if (className[0] == 'C') { if (strcmp(className, "Crc32") == 0) { return InstructionSet_Crc32; } } else if (className[0] == 'D') { if (strcmp(className, "Dp") == 0) { return InstructionSet_Dp; } } else if (className[0] == 'R') { if (strcmp(className, "Rdm") == 0) { return InstructionSet_Rdm; } } else if (className[0] == 'S') { if (strcmp(className, "Sha1") == 0) { return InstructionSet_Sha1; } if (strcmp(className, "Sha256") == 0) { return InstructionSet_Sha256; } } else if (className[0] == 'V') { if (strncmp(className, "Vector64", 8) == 0) { return InstructionSet_Vector64; } else if (strncmp(className, "Vector128", 9) == 0) { return InstructionSet_Vector128; } } return InstructionSet_ILLEGAL; } //------------------------------------------------------------------------ // lookupIsa: Gets the InstructionSet for a given class name and enclsoing class name // // Arguments: // className -- The name of the class associated with the InstructionSet to lookup // enclosingClassName -- The name of the enclosing class or nullptr if one doesn't exist // // Return Value: // The InstructionSet associated with className and enclosingClassName CORINFO_InstructionSet HWIntrinsicInfo::lookupIsa(const char* className, const char* enclosingClassName) { assert(className != nullptr); if (strcmp(className, "Arm64") == 0) { assert(enclosingClassName != nullptr); return Arm64VersionOfIsa(lookupInstructionSet(enclosingClassName)); } else { return lookupInstructionSet(className); } } //------------------------------------------------------------------------ // isFullyImplementedIsa: Gets a value that indicates whether the InstructionSet is fully implemented // // Arguments: // isa - The InstructionSet to check // // Return Value: // true if isa is supported; otherwise, false bool HWIntrinsicInfo::isFullyImplementedIsa(CORINFO_InstructionSet isa) { switch (isa) { // These ISAs are fully implemented case InstructionSet_AdvSimd: case InstructionSet_AdvSimd_Arm64: case InstructionSet_Aes: case InstructionSet_Aes_Arm64: case InstructionSet_ArmBase: case InstructionSet_ArmBase_Arm64: case InstructionSet_Crc32: case InstructionSet_Crc32_Arm64: case InstructionSet_Dp: case InstructionSet_Dp_Arm64: case InstructionSet_Rdm: case InstructionSet_Rdm_Arm64: case InstructionSet_Sha1: case InstructionSet_Sha1_Arm64: case InstructionSet_Sha256: case InstructionSet_Sha256_Arm64: case InstructionSet_Vector64: case InstructionSet_Vector128: return true; default: return false; } } //------------------------------------------------------------------------ // isScalarIsa: Gets a value that indicates whether the InstructionSet is scalar // // Arguments: // isa - The InstructionSet to check // // Return Value: // true if isa is scalar; otherwise, false bool HWIntrinsicInfo::isScalarIsa(CORINFO_InstructionSet isa) { switch (isa) { case InstructionSet_ArmBase: case InstructionSet_ArmBase_Arm64: case InstructionSet_Crc32: case InstructionSet_Crc32_Arm64: { return true; } default: { return false; } } } //------------------------------------------------------------------------ // lookupImmBounds: Gets the lower and upper bounds for the imm-value of a given NamedIntrinsic // // Arguments: // intrinsic -- NamedIntrinsic associated with the HWIntrinsic to lookup // simdType -- vector size // baseType -- base type of the Vector64/128<T> // pImmLowerBound [OUT] - The lower incl. bound for a value of the intrinsic immediate operand // pImmUpperBound [OUT] - The upper incl. bound for a value of the intrinsic immediate operand // void HWIntrinsicInfo::lookupImmBounds( NamedIntrinsic intrinsic, int simdSize, var_types baseType, int* pImmLowerBound, int* pImmUpperBound) { HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsic); bool hasImmediateOperand = HasImmediateOperand(intrinsic); assert(hasImmediateOperand); assert(pImmLowerBound != nullptr); assert(pImmUpperBound != nullptr); int immLowerBound = 0; int immUpperBound = 0; if (category == HW_Category_ShiftLeftByImmediate) { // The left shift amount is in the range 0 to the element width in bits minus 1. immUpperBound = BITS_PER_BYTE * genTypeSize(baseType) - 1; } else if (category == HW_Category_ShiftRightByImmediate) { // The right shift amount, in the range 1 to the element width in bits. immLowerBound = 1; immUpperBound = BITS_PER_BYTE * genTypeSize(baseType); } else if (category == HW_Category_SIMDByIndexedElement) { immUpperBound = Compiler::getSIMDVectorLength(simdSize, baseType) - 1; } else { switch (intrinsic) { case NI_AdvSimd_DuplicateSelectedScalarToVector64: case NI_AdvSimd_DuplicateSelectedScalarToVector128: case NI_AdvSimd_Extract: case NI_AdvSimd_ExtractVector128: case NI_AdvSimd_ExtractVector64: case NI_AdvSimd_Insert: case NI_AdvSimd_InsertScalar: case NI_AdvSimd_LoadAndInsertScalar: case NI_AdvSimd_StoreSelectedScalar: case NI_AdvSimd_Arm64_DuplicateSelectedScalarToVector128: case NI_AdvSimd_Arm64_InsertSelectedScalar: immUpperBound = Compiler::getSIMDVectorLength(simdSize, baseType) - 1; break; default: unreached(); } } assert(immLowerBound <= immUpperBound); *pImmLowerBound = immLowerBound; *pImmUpperBound = immUpperBound; } //------------------------------------------------------------------------ // impNonConstFallback: generate alternate code when the imm-arg is not a compile-time constant // // Arguments: // intrinsic -- intrinsic ID // simdType -- Vector type // simdBaseJitType -- base JIT type of the Vector64/128<T> // // Return Value: // return the IR of semantic alternative on non-const imm-arg // GenTree* Compiler::impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, CorInfoType simdBaseJitType) { return nullptr; } //------------------------------------------------------------------------ // impSpecialIntrinsic: Import a hardware intrinsic that requires special handling as a GT_HWINTRINSIC node if possible // // Arguments: // intrinsic -- id of the intrinsic function. // clsHnd -- class handle containing the intrinsic function. // method -- method handle of the intrinsic function. // sig -- signature of the intrinsic call. // simdBaseJitType -- generic argument of the intrinsic. // retType -- return type of the intrinsic. // // Return Value: // The GT_HWINTRINSIC node, or nullptr if not a supported intrinsic // GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType, var_types retType, unsigned simdSize) { const HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsic); const int numArgs = sig->numArgs; // The vast majority of "special" intrinsics are Vector64/Vector128 methods. // The only exception is ArmBase.Yield which should be treated differently. if (intrinsic == NI_ArmBase_Yield) { assert(sig->numArgs == 0); assert(JITtype2varType(sig->retType) == TYP_VOID); assert(simdSize == 0); return gtNewScalarHWIntrinsicNode(TYP_VOID, intrinsic); } assert(category != HW_Category_Scalar); assert(!HWIntrinsicInfo::isScalarIsa(HWIntrinsicInfo::lookupIsa(intrinsic))); if (!supportSIMDTypes() || !IsBaselineSimdIsaSupported()) { return nullptr; } assert(numArgs >= 0); var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); GenTree* retNode = nullptr; GenTree* op1 = nullptr; GenTree* op2 = nullptr; GenTree* op3 = nullptr; GenTree* op4 = nullptr; switch (intrinsic) { case NI_Vector64_Abs: case NI_Vector128_Abs: { assert(sig->numArgs == 1); op1 = impSIMDPopStack(retType); retNode = gtNewSimdAbsNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_Add: case NI_Vector128_Add: case NI_Vector64_op_Addition: case NI_Vector128_op_Addition: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_AndNot: case NI_Vector128_AndNot: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdBinOpNode(GT_AND_NOT, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_As: case NI_Vector64_AsByte: case NI_Vector64_AsDouble: case NI_Vector64_AsInt16: case NI_Vector64_AsInt32: case NI_Vector64_AsInt64: case NI_Vector64_AsSByte: case NI_Vector64_AsSingle: case NI_Vector64_AsUInt16: case NI_Vector64_AsUInt32: case NI_Vector64_AsUInt64: case NI_Vector128_As: case NI_Vector128_AsByte: case NI_Vector128_AsDouble: case NI_Vector128_AsInt16: case NI_Vector128_AsInt32: case NI_Vector128_AsInt64: case NI_Vector128_AsSByte: case NI_Vector128_AsSingle: case NI_Vector128_AsUInt16: case NI_Vector128_AsUInt32: case NI_Vector128_AsUInt64: case NI_Vector128_AsVector: case NI_Vector128_AsVector4: case NI_Vector128_AsVector128: { assert(!sig->hasThis()); assert(numArgs == 1); // We fold away the cast here, as it only exists to satisfy // the type system. It is safe to do this here since the retNode type // and the signature return type are both the same TYP_SIMD. retNode = impSIMDPopStack(retType, /* expectAddr: */ false, sig->retTypeClass); SetOpLclRelatedToSIMDIntrinsic(retNode); assert(retNode->gtType == getSIMDTypeForSize(getSIMDTypeSizeInBytes(sig->retTypeSigClass))); break; } case NI_Vector64_BitwiseAnd: case NI_Vector128_BitwiseAnd: case NI_Vector64_op_BitwiseAnd: case NI_Vector128_op_BitwiseAnd: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_BitwiseOr: case NI_Vector128_BitwiseOr: case NI_Vector64_op_BitwiseOr: case NI_Vector128_op_BitwiseOr: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdBinOpNode(GT_OR, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_Ceiling: case NI_Vector128_Ceiling: { assert(sig->numArgs == 1); assert(varTypeIsFloating(simdBaseType)); op1 = impSIMDPopStack(retType); retNode = gtNewSimdCeilNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_ConditionalSelect: case NI_Vector128_ConditionalSelect: { assert(sig->numArgs == 3); op3 = impSIMDPopStack(retType); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdCndSelNode(retType, op1, op2, op3, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_ConvertToDouble: case NI_Vector128_ConvertToDouble: { assert(sig->numArgs == 1); assert((simdBaseType == TYP_LONG) || (simdBaseType == TYP_ULONG)); intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_ConvertToDoubleScalar : NI_AdvSimd_Arm64_ConvertToDouble; op1 = impSIMDPopStack(retType); retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); break; } case NI_Vector64_ConvertToInt32: case NI_Vector128_ConvertToInt32: { assert(sig->numArgs == 1); assert(simdBaseType == TYP_FLOAT); op1 = impSIMDPopStack(retType); retNode = gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_ConvertToInt32RoundToZero, simdBaseJitType, simdSize); break; } case NI_Vector64_ConvertToInt64: case NI_Vector128_ConvertToInt64: { assert(sig->numArgs == 1); assert(simdBaseType == TYP_DOUBLE); intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_ConvertToInt64RoundToZeroScalar : NI_AdvSimd_Arm64_ConvertToInt64RoundToZero; op1 = impSIMDPopStack(retType); retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); break; } case NI_Vector64_ConvertToSingle: case NI_Vector128_ConvertToSingle: { assert(sig->numArgs == 1); assert((simdBaseType == TYP_INT) || (simdBaseType == TYP_UINT)); op1 = impSIMDPopStack(retType); retNode = gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_ConvertToSingle, simdBaseJitType, simdSize); break; } case NI_Vector64_ConvertToUInt32: case NI_Vector128_ConvertToUInt32: { assert(sig->numArgs == 1); assert(simdBaseType == TYP_FLOAT); op1 = impSIMDPopStack(retType); retNode = gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_ConvertToUInt32RoundToZero, simdBaseJitType, simdSize); break; } case NI_Vector64_ConvertToUInt64: case NI_Vector128_ConvertToUInt64: { assert(sig->numArgs == 1); assert(simdBaseType == TYP_DOUBLE); intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_ConvertToUInt64RoundToZeroScalar : NI_AdvSimd_Arm64_ConvertToUInt64RoundToZero; op1 = impSIMDPopStack(retType); retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); break; } case NI_Vector64_Create: case NI_Vector128_Create: { // We shouldn't handle this as an intrinsic if the // respective ISAs have been disabled by the user. IntrinsicNodeBuilder nodeBuilder(getAllocator(CMK_ASTNode), sig->numArgs); for (int i = sig->numArgs - 1; i >= 0; i--) { nodeBuilder.AddOperand(i, impPopStack().val); } retNode = gtNewSimdHWIntrinsicNode(retType, std::move(nodeBuilder), intrinsic, simdBaseJitType, simdSize); break; } case NI_Vector64_get_Count: case NI_Vector128_get_Count: { assert(!sig->hasThis()); assert(numArgs == 0); GenTreeIntCon* countNode = gtNewIconNode(getSIMDVectorLength(simdSize, simdBaseType), TYP_INT); countNode->gtFlags |= GTF_ICON_SIMD_COUNT; retNode = countNode; break; } case NI_Vector64_Divide: case NI_Vector128_Divide: case NI_Vector64_op_Division: case NI_Vector128_op_Division: { assert(sig->numArgs == 2); if (varTypeIsFloating(simdBaseType)) { op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdBinOpNode(GT_DIV, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); } break; } case NI_Vector64_Dot: case NI_Vector128_Dot: { assert(sig->numArgs == 2); if (!varTypeIsLong(simdBaseType)) { var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdDotProdNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); } break; } case NI_Vector64_Equals: case NI_Vector128_Equals: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdCmpOpNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_EqualsAll: case NI_Vector128_EqualsAll: case NI_Vector64_op_Equality: case NI_Vector128_op_Equality: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdCmpOpAllNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_EqualsAny: case NI_Vector128_EqualsAny: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdCmpOpAnyNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_ExtractMostSignificantBits: case NI_Vector128_ExtractMostSignificantBits: { assert(sig->numArgs == 1); // ARM64 doesn't have a single instruction that performs the behavior so we'll emulate it instead. // To do this, we effectively perform the following steps: // 1. tmp = input & 0x80 ; and the input to clear all but the most significant bit // 2. tmp = tmp >> index ; right shift each element by its index // 3. tmp = sum(tmp) ; sum the elements together // For byte/sbyte, we also need to handle the fact that we can only shift by up to 8 // but for Vector128, we have 16 elements to handle. In that scenario, we will simply // extract both scalars, and combine them via: (upper << 8) | lower var_types simdType = getSIMDTypeForSize(simdSize); op1 = impSIMDPopStack(simdType); GenTree* vectorCreateOp1 = nullptr; GenTree* vectorCreateOp2 = nullptr; CorInfoType vectorCreateType = varTypeIsUnsigned(simdBaseType) ? CORINFO_TYPE_ULONG : CORINFO_TYPE_LONG; switch (simdBaseType) { case TYP_BYTE: case TYP_UBYTE: { op2 = gtNewIconNode(0x80); simdBaseType = TYP_UBYTE; simdBaseJitType = CORINFO_TYPE_UBYTE; vectorCreateOp1 = gtNewLconNode(0x00FFFEFDFCFBFAF9); if (simdSize == 16) { vectorCreateOp2 = gtNewLconNode(0x00FFFEFDFCFBFAF9); } break; } case TYP_SHORT: case TYP_USHORT: { op2 = gtNewIconNode(0x8000); vectorCreateOp1 = gtNewLconNode(0xFFF4FFF3FFF2FFF1); if (simdSize == 16) { vectorCreateOp2 = gtNewLconNode(0xFFF8FFF7FFF6FFF5); } break; } case TYP_INT: case TYP_UINT: case TYP_FLOAT: { op2 = gtNewIconNode(0x80000000); simdBaseType = TYP_INT; simdBaseJitType = CORINFO_TYPE_INT; vectorCreateOp1 = gtNewLconNode(0xFFFFFFE2FFFFFFE1); if (simdSize == 16) { vectorCreateOp2 = gtNewLconNode(0xFFFFFFE4FFFFFFE3); } break; } case TYP_LONG: case TYP_ULONG: case TYP_DOUBLE: { op2 = gtNewLconNode(0x8000000000000000); simdBaseType = TYP_LONG; simdBaseJitType = CORINFO_TYPE_LONG; vectorCreateOp1 = gtNewLconNode(0xFFFFFFFFFFFFFFC1); if (simdSize == 16) { vectorCreateOp2 = gtNewLconNode(0xFFFFFFFFFFFFFFC2); } break; } default: { unreached(); } } if (simdSize == 16) { op3 = gtNewSimdHWIntrinsicNode(simdType, vectorCreateOp1, vectorCreateOp2, NI_Vector128_Create, vectorCreateType, simdSize); } else { op3 = gtNewSimdHWIntrinsicNode(simdType, vectorCreateOp1, NI_Vector64_Create, vectorCreateType, simdSize); } op2 = gtNewSimdCreateBroadcastNode(simdType, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); op1 = gtNewSimdHWIntrinsicNode(simdType, op1, op2, NI_AdvSimd_And, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); NamedIntrinsic shiftIntrinsic = NI_AdvSimd_ShiftLogical; if ((simdSize == 8) && varTypeIsLong(simdBaseType)) { shiftIntrinsic = NI_AdvSimd_ShiftLogicalScalar; } op1 = gtNewSimdHWIntrinsicNode(simdType, op1, op3, shiftIntrinsic, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); if (varTypeIsByte(simdBaseType) && (simdSize == 16)) { CORINFO_CLASS_HANDLE simdClsHnd = gtGetStructHandleForSIMD(simdType, simdBaseJitType); op1 = impCloneExpr(op1, &op2, simdClsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector extractmostsignificantbits")); op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_Vector128_GetLower, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 8, /* isSimdAsHWIntrinsic */ false); op1 = gtNewSimdHWIntrinsicNode(simdBaseType, op1, NI_Vector64_ToScalar, simdBaseJitType, 8, /* isSimdAsHWIntrinsic */ false); op1 = gtNewCastNode(TYP_INT, op1, /* isUnsigned */ true, TYP_INT); GenTree* zero = gtNewSimdZeroNode(simdType, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); ssize_t index = 8 / genTypeSize(simdBaseType); op2 = gtNewSimdHWIntrinsicNode(simdType, op2, zero, gtNewIconNode(index), NI_AdvSimd_ExtractVector128, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op2, NI_Vector128_GetLower, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op2, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 8, /* isSimdAsHWIntrinsic */ false); op2 = gtNewSimdHWIntrinsicNode(simdBaseType, op2, NI_Vector64_ToScalar, simdBaseJitType, 8, /* isSimdAsHWIntrinsic */ false); op2 = gtNewCastNode(TYP_INT, op2, /* isUnsigned */ true, TYP_INT); op2 = gtNewOperNode(GT_LSH, TYP_INT, op2, gtNewIconNode(8)); retNode = gtNewOperNode(GT_OR, TYP_INT, op1, op2); } else { if (!varTypeIsLong(simdBaseType)) { if ((simdSize == 8) && ((simdBaseType == TYP_INT) || (simdBaseType == TYP_UINT))) { CORINFO_CLASS_HANDLE simdClsHnd = gtGetStructHandleForSIMD(simdType, simdBaseJitType); op1 = impCloneExpr(op1, &op2, simdClsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector extractmostsignificantbits")); op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, op2, NI_AdvSimd_AddPairwise, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); } else { op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); } } else if (simdSize == 16) { op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); } retNode = gtNewSimdHWIntrinsicNode(simdBaseType, op1, NI_Vector64_ToScalar, simdBaseJitType, 8, /* isSimdAsHWIntrinsic */ false); if ((simdBaseType != TYP_INT) && (simdBaseType != TYP_UINT)) { retNode = gtNewCastNode(TYP_INT, retNode, /* isUnsigned */ true, TYP_INT); } } break; } case NI_Vector64_Floor: case NI_Vector128_Floor: { assert(sig->numArgs == 1); assert(varTypeIsFloating(simdBaseType)); op1 = impSIMDPopStack(retType); retNode = gtNewSimdFloorNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_get_AllBitsSet: case NI_Vector128_get_AllBitsSet: { assert(!sig->hasThis()); assert(numArgs == 0); retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, simdBaseJitType, simdSize); break; } case NI_Vector64_get_Zero: case NI_Vector128_get_Zero: { assert(sig->numArgs == 0); retNode = gtNewSimdZeroNode(retType, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_GetElement: case NI_Vector128_GetElement: { assert(!sig->hasThis()); assert(numArgs == 2); op2 = impPopStack().val; op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize)); const bool isSimdAsHWIntrinsic = true; retNode = gtNewSimdGetElementNode(retType, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); break; } case NI_Vector128_GetUpper: { // Converts to equivalent managed code: // AdvSimd.ExtractVector128(vector, Vector128<T>.Zero, 8 / sizeof(T)).GetLower(); assert(numArgs == 1); op1 = impPopStack().val; GenTree* zero = gtNewSimdHWIntrinsicNode(retType, NI_Vector128_get_Zero, simdBaseJitType, simdSize); ssize_t index = 8 / genTypeSize(simdBaseType); retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, zero, gtNewIconNode(index), NI_AdvSimd_ExtractVector128, simdBaseJitType, simdSize); retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD8, retNode, NI_Vector128_GetLower, simdBaseJitType, 8); break; } case NI_Vector64_GreaterThan: case NI_Vector128_GreaterThan: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdCmpOpNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_GreaterThanAll: case NI_Vector128_GreaterThanAll: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdCmpOpAllNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_GreaterThanAny: case NI_Vector128_GreaterThanAny: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdCmpOpAnyNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_GreaterThanOrEqual: case NI_Vector128_GreaterThanOrEqual: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdCmpOpNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_GreaterThanOrEqualAll: case NI_Vector128_GreaterThanOrEqualAll: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdCmpOpAllNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_GreaterThanOrEqualAny: case NI_Vector128_GreaterThanOrEqualAny: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdCmpOpAnyNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_LessThan: case NI_Vector128_LessThan: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdCmpOpNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_LessThanAll: case NI_Vector128_LessThanAll: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdCmpOpAllNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_LessThanAny: case NI_Vector128_LessThanAny: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdCmpOpAnyNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_LessThanOrEqual: case NI_Vector128_LessThanOrEqual: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdCmpOpNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_LessThanOrEqualAll: case NI_Vector128_LessThanOrEqualAll: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdCmpOpAllNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_LessThanOrEqualAny: case NI_Vector128_LessThanOrEqualAny: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdCmpOpAnyNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_Load: case NI_Vector128_Load: { assert(sig->numArgs == 1); op1 = impPopStack().val; if (op1->OperIs(GT_CAST)) { // Although the API specifies a pointer, if what we have is a BYREF, that's what // we really want, so throw away the cast. if (op1->gtGetOp1()->TypeGet() == TYP_BYREF) { op1 = op1->gtGetOp1(); } } NamedIntrinsic loadIntrinsic = NI_Illegal; if (simdSize == 16) { loadIntrinsic = NI_AdvSimd_LoadVector128; } else { loadIntrinsic = NI_AdvSimd_LoadVector64; } retNode = gtNewSimdHWIntrinsicNode(retType, op1, loadIntrinsic, simdBaseJitType, simdSize); break; } case NI_Vector64_LoadAligned: case NI_Vector128_LoadAligned: { assert(sig->numArgs == 1); if (!opts.MinOpts()) { // ARM64 doesn't have aligned loads, but aligned loads are only validated to be // aligned during minopts, so only skip the intrinsic handling if we're minopts break; } op1 = impPopStack().val; if (op1->OperIs(GT_CAST)) { // Although the API specifies a pointer, if what we have is a BYREF, that's what // we really want, so throw away the cast. if (op1->gtGetOp1()->TypeGet() == TYP_BYREF) { op1 = op1->gtGetOp1(); } } NamedIntrinsic loadIntrinsic = NI_Illegal; if (simdSize == 16) { loadIntrinsic = NI_AdvSimd_LoadVector128; } else { loadIntrinsic = NI_AdvSimd_LoadVector64; } retNode = gtNewSimdHWIntrinsicNode(retType, op1, loadIntrinsic, simdBaseJitType, simdSize); break; } case NI_Vector64_LoadAlignedNonTemporal: case NI_Vector128_LoadAlignedNonTemporal: { assert(sig->numArgs == 1); if (!opts.MinOpts()) { // ARM64 doesn't have aligned loads, but aligned loads are only validated to be // aligned during minopts, so only skip the intrinsic handling if we're minopts break; } op1 = impPopStack().val; if (op1->OperIs(GT_CAST)) { // Although the API specifies a pointer, if what we have is a BYREF, that's what // we really want, so throw away the cast. if (op1->gtGetOp1()->TypeGet() == TYP_BYREF) { op1 = op1->gtGetOp1(); } } // ARM64 has non-temporal loads (LDNP) but we don't currently support them NamedIntrinsic loadIntrinsic = NI_Illegal; if (simdSize == 16) { loadIntrinsic = NI_AdvSimd_LoadVector128; } else { loadIntrinsic = NI_AdvSimd_LoadVector64; } retNode = gtNewSimdHWIntrinsicNode(retType, op1, loadIntrinsic, simdBaseJitType, simdSize); break; } case NI_Vector64_LoadUnsafe: case NI_Vector128_LoadUnsafe: { if (sig->numArgs == 2) { op2 = impPopStack().val; } else { assert(sig->numArgs == 1); } op1 = impPopStack().val; if (op1->OperIs(GT_CAST)) { // Although the API specifies a pointer, if what we have is a BYREF, that's what // we really want, so throw away the cast. if (op1->gtGetOp1()->TypeGet() == TYP_BYREF) { op1 = op1->gtGetOp1(); } } if (sig->numArgs == 2) { op3 = gtNewIconNode(genTypeSize(simdBaseType), op2->TypeGet()); op2 = gtNewOperNode(GT_MUL, op2->TypeGet(), op2, op3); op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1, op2); } NamedIntrinsic loadIntrinsic = NI_Illegal; if (simdSize == 16) { loadIntrinsic = NI_AdvSimd_LoadVector128; } else { loadIntrinsic = NI_AdvSimd_LoadVector64; } retNode = gtNewSimdHWIntrinsicNode(retType, op1, loadIntrinsic, simdBaseJitType, simdSize); break; } case NI_Vector64_Max: case NI_Vector128_Max: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdMaxNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_Min: case NI_Vector128_Min: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdMinNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_Multiply: case NI_Vector128_Multiply: case NI_Vector64_op_Multiply: case NI_Vector128_op_Multiply: { assert(sig->numArgs == 2); if (varTypeIsLong(simdBaseType)) { // TODO-ARM64-CQ: We should support long/ulong multiplication. break; } CORINFO_ARG_LIST_HANDLE arg1 = sig->args; CORINFO_ARG_LIST_HANDLE arg2 = info.compCompHnd->getArgNext(arg1); var_types argType = TYP_UNKNOWN; CORINFO_CLASS_HANDLE argClass = NO_CLASS_HANDLE; argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg2, &argClass))); op2 = getArgForHWIntrinsic(argType, argClass); argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg1, &argClass))); op1 = getArgForHWIntrinsic(argType, argClass); retNode = gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_Narrow: case NI_Vector128_Narrow: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdNarrowNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_Negate: case NI_Vector128_Negate: case NI_Vector64_op_UnaryNegation: case NI_Vector128_op_UnaryNegation: { assert(sig->numArgs == 1); op1 = impSIMDPopStack(retType); retNode = gtNewSimdUnOpNode(GT_NEG, retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_OnesComplement: case NI_Vector128_OnesComplement: case NI_Vector64_op_OnesComplement: case NI_Vector128_op_OnesComplement: { assert(sig->numArgs == 1); op1 = impSIMDPopStack(retType); retNode = gtNewSimdUnOpNode(GT_NOT, retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_op_Inequality: case NI_Vector128_op_Inequality: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdCmpOpAnyNode(GT_NE, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_op_UnaryPlus: case NI_Vector128_op_UnaryPlus: { assert(sig->numArgs == 1); retNode = impSIMDPopStack(retType); break; } case NI_Vector64_Subtract: case NI_Vector128_Subtract: case NI_Vector64_op_Subtraction: case NI_Vector128_op_Subtraction: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_ShiftLeft: case NI_Vector128_ShiftLeft: { assert(sig->numArgs == 2); op2 = impPopStack().val; op1 = impSIMDPopStack(retType); retNode = gtNewSimdBinOpNode(GT_LSH, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_ShiftRightArithmetic: case NI_Vector128_ShiftRightArithmetic: { assert(sig->numArgs == 2); op2 = impPopStack().val; op1 = impSIMDPopStack(retType); retNode = gtNewSimdBinOpNode(GT_RSH, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_ShiftRightLogical: case NI_Vector128_ShiftRightLogical: { assert(sig->numArgs == 2); op2 = impPopStack().val; op1 = impSIMDPopStack(retType); retNode = gtNewSimdBinOpNode(GT_RSZ, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_Sqrt: case NI_Vector128_Sqrt: { assert(sig->numArgs == 1); if (varTypeIsFloating(simdBaseType)) { op1 = impSIMDPopStack(retType); retNode = gtNewSimdSqrtNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); } break; } case NI_Vector64_Store: case NI_Vector128_Store: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); op2 = impPopStack().val; op1 = impSIMDPopStack(simdType); retNode = gtNewSimdHWIntrinsicNode(retType, op2, op1, NI_AdvSimd_Store, simdBaseJitType, simdSize); break; } case NI_Vector64_StoreAligned: case NI_Vector128_StoreAligned: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); if (!opts.MinOpts()) { // ARM64 doesn't have aligned stores, but aligned stores are only validated to be // aligned during minopts, so only skip the intrinsic handling if we're minopts break; } op2 = impPopStack().val; op1 = impSIMDPopStack(simdType); retNode = gtNewSimdHWIntrinsicNode(retType, op2, op1, NI_AdvSimd_Store, simdBaseJitType, simdSize); break; } case NI_Vector64_StoreAlignedNonTemporal: case NI_Vector128_StoreAlignedNonTemporal: { assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); if (!opts.MinOpts()) { // ARM64 doesn't have aligned stores, but aligned stores are only validated to be // aligned during minopts, so only skip the intrinsic handling if we're minopts break; } op2 = impPopStack().val; op1 = impSIMDPopStack(simdType); // ARM64 has non-temporal stores (STNP) but we don't currently support them retNode = gtNewSimdHWIntrinsicNode(retType, op2, op1, NI_AdvSimd_Store, simdBaseJitType, simdSize); break; } case NI_Vector64_StoreUnsafe: case NI_Vector128_StoreUnsafe: { var_types simdType = getSIMDTypeForSize(simdSize); if (sig->numArgs == 3) { op3 = impPopStack().val; } else { assert(sig->numArgs == 2); } op2 = impPopStack().val; op1 = impSIMDPopStack(simdType); if (sig->numArgs == 3) { op4 = gtNewIconNode(genTypeSize(simdBaseType), op3->TypeGet()); op3 = gtNewOperNode(GT_MUL, op3->TypeGet(), op3, op4); op2 = gtNewOperNode(GT_ADD, op2->TypeGet(), op2, op3); } retNode = gtNewSimdHWIntrinsicNode(retType, op2, op1, NI_AdvSimd_Store, simdBaseJitType, simdSize); break; } case NI_Vector64_Sum: case NI_Vector128_Sum: { assert(sig->numArgs == 1); var_types simdType = getSIMDTypeForSize(simdSize); op1 = impSIMDPopStack(simdType); retNode = gtNewSimdSumNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_WidenLower: case NI_Vector128_WidenLower: { assert(sig->numArgs == 1); op1 = impSIMDPopStack(retType); retNode = gtNewSimdWidenLowerNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_WidenUpper: case NI_Vector128_WidenUpper: { assert(sig->numArgs == 1); op1 = impSIMDPopStack(retType); retNode = gtNewSimdWidenUpperNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_Vector64_WithElement: case NI_Vector128_WithElement: { assert(numArgs == 3); GenTree* indexOp = impStackTop(1).val; if (!indexOp->OperIsConst()) { // TODO-XARCH-CQ: We should always import these like we do with GetElement // If index is not constant use software fallback. return nullptr; } ssize_t imm8 = indexOp->AsIntCon()->IconValue(); ssize_t count = simdSize / genTypeSize(simdBaseType); if (imm8 >= count || imm8 < 0) { // Using software fallback if index is out of range (throw exeception) return nullptr; } GenTree* valueOp = impPopStack().val; impPopStack(); // pop the indexOp that we already have. GenTree* vectorOp = impSIMDPopStack(getSIMDTypeForSize(simdSize)); retNode = gtNewSimdWithElementNode(retType, vectorOp, indexOp, valueOp, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); break; } case NI_Vector64_Xor: case NI_Vector128_Xor: case NI_Vector64_op_ExclusiveOr: case NI_Vector128_op_ExclusiveOr: { assert(sig->numArgs == 2); op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); retNode = gtNewSimdBinOpNode(GT_XOR, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); break; } case NI_AdvSimd_Arm64_LoadPairScalarVector64: case NI_AdvSimd_Arm64_LoadPairScalarVector64NonTemporal: case NI_AdvSimd_Arm64_LoadPairVector128: case NI_AdvSimd_Arm64_LoadPairVector128NonTemporal: case NI_AdvSimd_Arm64_LoadPairVector64: case NI_AdvSimd_Arm64_LoadPairVector64NonTemporal: { op1 = impPopStack().val; if (op1->OperIs(GT_CAST)) { // Although the API specifies a pointer, if what we have is a BYREF, that's what // we really want, so throw away the cast. if (op1->gtGetOp1()->TypeGet() == TYP_BYREF) { op1 = op1->gtGetOp1(); } } GenTree* loadIntrinsic = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); // This operation contains an implicit indirection // it could point into the global heap or // it could throw a null reference exception. // loadIntrinsic->gtFlags |= (GTF_GLOB_REF | GTF_EXCEPT); assert(HWIntrinsicInfo::IsMultiReg(intrinsic)); const unsigned lclNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg intrinsic")); impAssignTempGen(lclNum, loadIntrinsic, sig->retTypeSigClass, (unsigned)CHECK_SPILL_ALL); LclVarDsc* varDsc = lvaGetDesc(lclNum); // The following is to exclude the fields of the local to have SSA. varDsc->lvIsMultiRegRet = true; GenTreeLclVar* lclVar = gtNewLclvNode(lclNum, varDsc->lvType); lclVar->SetDoNotCSE(); lclVar->SetMultiReg(); retNode = lclVar; break; } default: { return nullptr; } } return retNode; } #endif // FEATURE_HW_INTRINSICS
1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/jit/importer.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Importer XX XX XX XX Imports the given method and converts it to semantic trees XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "corexcep.h" #define Verify(cond, msg) \ do \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ } \ } while (0) #define VerifyOrReturn(cond, msg) \ do \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ return; \ } \ } while (0) #define VerifyOrReturnSpeculative(cond, msg, speculative) \ do \ { \ if (speculative) \ { \ if (!(cond)) \ { \ return false; \ } \ } \ else \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ return false; \ } \ } \ } while (0) /*****************************************************************************/ void Compiler::impInit() { impStmtList = impLastStmt = nullptr; #ifdef DEBUG impInlinedCodeSize = 0; #endif // DEBUG } /***************************************************************************** * * Pushes the given tree on the stack. */ void Compiler::impPushOnStack(GenTree* tree, typeInfo ti) { /* Check for overflow. If inlining, we may be using a bigger stack */ if ((verCurrentState.esStackDepth >= info.compMaxStack) && (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0))) { BADCODE("stack overflow"); } #ifdef DEBUG // If we are pushing a struct, make certain we know the precise type! if (tree->TypeGet() == TYP_STRUCT) { assert(ti.IsType(TI_STRUCT)); CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle(); assert(clsHnd != NO_CLASS_HANDLE); } #endif // DEBUG verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti; verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree; if ((tree->gtType == TYP_LONG) && (compLongUsed == false)) { compLongUsed = true; } else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false)) { compFloatingPointUsed = true; } } inline void Compiler::impPushNullObjRefOnStack() { impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL)); } // This method gets called when we run into unverifiable code // (and we are verifying the method) inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file) DEBUGARG(unsigned line)) { #ifdef DEBUG const char* tail = strrchr(file, '\\'); if (tail) { file = tail + 1; } if (JitConfig.JitBreakOnUnsafeCode()) { assert(!"Unsafe code detected"); } #endif JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); if (compIsForImportOnly()) { JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line)); } } inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file) DEBUGARG(unsigned line)) { JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); #ifdef DEBUG // BreakIfDebuggerPresent(); if (getBreakOnBadCode()) { assert(!"Typechecking error"); } #endif RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr); UNREACHABLE(); } // helper function that will tell us if the IL instruction at the addr passed // by param consumes an address at the top of the stack. We use it to save // us lvAddrTaken bool Compiler::impILConsumesAddr(const BYTE* codeAddr) { assert(!compIsForInlining()); OPCODE opcode; opcode = (OPCODE)getU1LittleEndian(codeAddr); switch (opcode) { // case CEE_LDFLDA: We're taking this one out as if you have a sequence // like // // ldloca.0 // ldflda whatever // // of a primitivelike struct, you end up after morphing with addr of a local // that's not marked as addrtaken, which is wrong. Also ldflda is usually used // for structs that contain other structs, which isnt a case we handle very // well now for other reasons. case CEE_LDFLD: { // We won't collapse small fields. This is probably not the right place to have this // check, but we're only using the function for this purpose, and is easy to factor // out if we need to do so. CORINFO_RESOLVED_TOKEN resolvedToken; impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field); var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField)); // Preserve 'small' int types if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } if (varTypeIsSmall(lclTyp)) { return false; } return true; } default: break; } return false; } void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind) { pResolvedToken->tokenContext = impTokenLookupContextHandle; pResolvedToken->tokenScope = info.compScopeHnd; pResolvedToken->token = getU4LittleEndian(addr); pResolvedToken->tokenType = kind; info.compCompHnd->resolveToken(pResolvedToken); } /***************************************************************************** * * Pop one tree from the stack. */ StackEntry Compiler::impPopStack() { if (verCurrentState.esStackDepth == 0) { BADCODE("stack underflow"); } return verCurrentState.esStack[--verCurrentState.esStackDepth]; } /***************************************************************************** * * Peep at n'th (0-based) tree on the top of the stack. */ StackEntry& Compiler::impStackTop(unsigned n) { if (verCurrentState.esStackDepth <= n) { BADCODE("stack underflow"); } return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1]; } unsigned Compiler::impStackHeight() { return verCurrentState.esStackDepth; } /***************************************************************************** * Some of the trees are spilled specially. While unspilling them, or * making a copy, these need to be handled specially. The function * enumerates the operators possible after spilling. */ #ifdef DEBUG // only used in asserts static bool impValidSpilledStackEntry(GenTree* tree) { if (tree->gtOper == GT_LCL_VAR) { return true; } if (tree->OperIsConst()) { return true; } return false; } #endif /***************************************************************************** * * The following logic is used to save/restore stack contents. * If 'copy' is true, then we make a copy of the trees on the stack. These * have to all be cloneable/spilled values. */ void Compiler::impSaveStackState(SavedStack* savePtr, bool copy) { savePtr->ssDepth = verCurrentState.esStackDepth; if (verCurrentState.esStackDepth) { savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth]; size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees); if (copy) { StackEntry* table = savePtr->ssTrees; /* Make a fresh copy of all the stack entries */ for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++) { table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo; GenTree* tree = verCurrentState.esStack[level].val; assert(impValidSpilledStackEntry(tree)); switch (tree->gtOper) { case GT_CNS_INT: case GT_CNS_LNG: case GT_CNS_DBL: case GT_CNS_STR: case GT_LCL_VAR: table->val = gtCloneExpr(tree); break; default: assert(!"Bad oper - Not covered by impValidSpilledStackEntry()"); break; } } } else { memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize); } } } void Compiler::impRestoreStackState(SavedStack* savePtr) { verCurrentState.esStackDepth = savePtr->ssDepth; if (verCurrentState.esStackDepth) { memcpy(verCurrentState.esStack, savePtr->ssTrees, verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack)); } } //------------------------------------------------------------------------ // impBeginTreeList: Get the tree list started for a new basic block. // inline void Compiler::impBeginTreeList() { assert(impStmtList == nullptr && impLastStmt == nullptr); } /***************************************************************************** * * Store the given start and end stmt in the given basic block. This is * mostly called by impEndTreeList(BasicBlock *block). It is called * directly only for handling CEE_LEAVEs out of finally-protected try's. */ inline void Compiler::impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt) { /* Make the list circular, so that we can easily walk it backwards */ firstStmt->SetPrevStmt(lastStmt); /* Store the tree list in the basic block */ block->bbStmtList = firstStmt; /* The block should not already be marked as imported */ assert((block->bbFlags & BBF_IMPORTED) == 0); block->bbFlags |= BBF_IMPORTED; } inline void Compiler::impEndTreeList(BasicBlock* block) { if (impStmtList == nullptr) { // The block should not already be marked as imported. assert((block->bbFlags & BBF_IMPORTED) == 0); // Empty block. Just mark it as imported. block->bbFlags |= BBF_IMPORTED; } else { impEndTreeList(block, impStmtList, impLastStmt); } #ifdef DEBUG if (impLastILoffsStmt != nullptr) { impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); impLastILoffsStmt = nullptr; } #endif impStmtList = impLastStmt = nullptr; } /***************************************************************************** * * Check that storing the given tree doesnt mess up the semantic order. Note * that this has only limited value as we can only check [0..chkLevel). */ inline void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel) { #ifndef DEBUG return; #else if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE) { return; } GenTree* tree = stmt->GetRootNode(); // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack if (tree->gtFlags & GTF_CALL) { for (unsigned level = 0; level < chkLevel; level++) { assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0); } } if (tree->gtOper == GT_ASG) { // For an assignment to a local variable, all references of that // variable have to be spilled. If it is aliased, all calls and // indirect accesses have to be spilled if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { unsigned lclNum = tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); for (unsigned level = 0; level < chkLevel; level++) { assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum)); assert(!lvaTable[lclNum].IsAddressExposed() || (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0); } } // If the access may be to global memory, all side effects have to be spilled. else if (tree->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) { for (unsigned level = 0; level < chkLevel; level++) { assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0); } } } #endif } //------------------------------------------------------------------------ // impAppendStmt: Append the given statement to the current block's tree list. // // // Arguments: // stmt - The statement to add. // chkLevel - [0..chkLevel) is the portion of the stack which we will check // for interference with stmt and spill if needed. // checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI // marks the debug info of the current boundary and is set when we // start importing IL at that boundary. If this parameter is true, // then the function checks if 'stmt' has been associated with the // current boundary, and if so, clears it so that we do not attach // it to more upcoming statements. // void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo) { if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } if ((chkLevel != 0) && (chkLevel != (unsigned)CHECK_SPILL_NONE)) { assert(chkLevel <= verCurrentState.esStackDepth); /* If the statement being appended has any side-effects, check the stack to see if anything needs to be spilled to preserve correct ordering. */ GenTree* expr = stmt->GetRootNode(); GenTreeFlags flags = expr->gtFlags & GTF_GLOB_EFFECT; // Assignment to (unaliased) locals don't count as a side-effect as // we handle them specially using impSpillLclRefs(). Temp locals should // be fine too. if ((expr->gtOper == GT_ASG) && (expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR) && ((expr->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) == 0) && !gtHasLocalsWithAddrOp(expr->AsOp()->gtOp2)) { GenTreeFlags op2Flags = expr->AsOp()->gtOp2->gtFlags & GTF_GLOB_EFFECT; assert(flags == (op2Flags | GTF_ASG)); flags = op2Flags; } if (flags != 0) { bool spillGlobEffects = false; if ((flags & GTF_CALL) != 0) { // If there is a call, we have to spill global refs spillGlobEffects = true; } else if (!expr->OperIs(GT_ASG)) { if ((flags & GTF_ASG) != 0) { // The expression is not an assignment node but it has an assignment side effect, it // must be an atomic op, HW intrinsic or some other kind of node that stores to memory. // Since we don't know what it assigns to, we need to spill global refs. spillGlobEffects = true; } } else { GenTree* lhs = expr->gtGetOp1(); GenTree* rhs = expr->gtGetOp2(); if (((rhs->gtFlags | lhs->gtFlags) & GTF_ASG) != 0) { // Either side of the assignment node has an assignment side effect. // Since we don't know what it assigns to, we need to spill global refs. spillGlobEffects = true; } else if ((lhs->gtFlags & GTF_GLOB_REF) != 0) { spillGlobEffects = true; } } impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt")); } else { impSpillSpecialSideEff(); } } impAppendStmtCheck(stmt, chkLevel); impAppendStmt(stmt); #ifdef FEATURE_SIMD impMarkContiguousSIMDFieldAssignments(stmt); #endif // Once we set the current offset as debug info in an appended tree, we are // ready to report the following offsets. Note that we need to compare // offsets here instead of debug info, since we do not set the "is call" // bit in impCurStmtDI. if (checkConsumedDebugInfo && (impLastStmt->GetDebugInfo().GetLocation().GetOffset() == impCurStmtDI.GetLocation().GetOffset())) { impCurStmtOffsSet(BAD_IL_OFFSET); } #ifdef DEBUG if (impLastILoffsStmt == nullptr) { impLastILoffsStmt = stmt; } if (verbose) { printf("\n\n"); gtDispStmt(stmt); } #endif } //------------------------------------------------------------------------ // impAppendStmt: Add the statement to the current stmts list. // // Arguments: // stmt - the statement to add. // inline void Compiler::impAppendStmt(Statement* stmt) { if (impStmtList == nullptr) { // The stmt is the first in the list. impStmtList = stmt; } else { // Append the expression statement to the existing list. impLastStmt->SetNextStmt(stmt); stmt->SetPrevStmt(impLastStmt); } impLastStmt = stmt; } //------------------------------------------------------------------------ // impExtractLastStmt: Extract the last statement from the current stmts list. // // Return Value: // The extracted statement. // // Notes: // It assumes that the stmt will be reinserted later. // Statement* Compiler::impExtractLastStmt() { assert(impLastStmt != nullptr); Statement* stmt = impLastStmt; impLastStmt = impLastStmt->GetPrevStmt(); if (impLastStmt == nullptr) { impStmtList = nullptr; } return stmt; } //------------------------------------------------------------------------- // impInsertStmtBefore: Insert the given "stmt" before "stmtBefore". // // Arguments: // stmt - a statement to insert; // stmtBefore - an insertion point to insert "stmt" before. // inline void Compiler::impInsertStmtBefore(Statement* stmt, Statement* stmtBefore) { assert(stmt != nullptr); assert(stmtBefore != nullptr); if (stmtBefore == impStmtList) { impStmtList = stmt; } else { Statement* stmtPrev = stmtBefore->GetPrevStmt(); stmt->SetPrevStmt(stmtPrev); stmtPrev->SetNextStmt(stmt); } stmt->SetNextStmt(stmtBefore); stmtBefore->SetPrevStmt(stmt); } //------------------------------------------------------------------------ // impAppendTree: Append the given expression tree to the current block's tree list. // // // Arguments: // tree - The tree that will be the root of the newly created statement. // chkLevel - [0..chkLevel) is the portion of the stack which we will check // for interference with stmt and spill if needed. // di - Debug information to associate with the statement. // checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI // marks the debug info of the current boundary and is set when we // start importing IL at that boundary. If this parameter is true, // then the function checks if 'stmt' has been associated with the // current boundary, and if so, clears it so that we do not attach // it to more upcoming statements. // // Return value: // The newly created statement. // Statement* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo) { assert(tree); /* Allocate an 'expression statement' node */ Statement* stmt = gtNewStmt(tree, di); /* Append the statement to the current block's stmt list */ impAppendStmt(stmt, chkLevel, checkConsumedDebugInfo); return stmt; } /***************************************************************************** * * Insert the given expression tree before "stmtBefore" */ void Compiler::impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore) { /* Allocate an 'expression statement' node */ Statement* stmt = gtNewStmt(tree, di); /* Append the statement to the current block's stmt list */ impInsertStmtBefore(stmt, stmtBefore); } /***************************************************************************** * * Append an assignment of the given value to a temp to the current tree list. * curLevel is the stack level for which the spill to the temp is being done. */ void Compiler::impAssignTempGen(unsigned tmp, GenTree* val, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* asg = gtNewTempAssign(tmp, val); if (!asg->IsNothingNode()) { if (pAfterStmt) { Statement* asgStmt = gtNewStmt(asg, di); fgInsertStmtAfter(block, *pAfterStmt, asgStmt); *pAfterStmt = asgStmt; } else { impAppendTree(asg, curLevel, impCurStmtDI); } } } /***************************************************************************** * same as above, but handle the valueclass case too */ void Compiler::impAssignTempGen(unsigned tmpNum, GenTree* val, CORINFO_CLASS_HANDLE structType, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* asg; assert(val->TypeGet() != TYP_STRUCT || structType != NO_CLASS_HANDLE); if (varTypeIsStruct(val) && (structType != NO_CLASS_HANDLE)) { assert(tmpNum < lvaCount); assert(structType != NO_CLASS_HANDLE); // if the method is non-verifiable the assert is not true // so at least ignore it in the case when verification is turned on // since any block that tries to use the temp would have failed verification. var_types varType = lvaTable[tmpNum].lvType; assert(varType == TYP_UNDEF || varTypeIsStruct(varType)); lvaSetStruct(tmpNum, structType, false); varType = lvaTable[tmpNum].lvType; // Now, set the type of the struct value. Note that lvaSetStruct may modify the type // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType) // that has been passed in for the value being assigned to the temp, in which case we // need to set 'val' to that same type. // Note also that if we always normalized the types of any node that might be a struct // type, this would not be necessary - but that requires additional JIT/EE interface // calls that may not actually be required - e.g. if we only access a field of a struct. GenTree* dst = gtNewLclvNode(tmpNum, varType); asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, di, block); } else { asg = gtNewTempAssign(tmpNum, val); } if (!asg->IsNothingNode()) { if (pAfterStmt) { Statement* asgStmt = gtNewStmt(asg, di); fgInsertStmtAfter(block, *pAfterStmt, asgStmt); *pAfterStmt = asgStmt; } else { impAppendTree(asg, curLevel, impCurStmtDI); } } } /***************************************************************************** * * Pop the given number of values from the stack and return a list node with * their values. * The 'prefixTree' argument may optionally contain an argument * list that is prepended to the list returned from this function. * * The notion of prepended is a bit misleading in that the list is backwards * from the way I would expect: The first element popped is at the end of * the returned list, and prefixTree is 'before' that, meaning closer to * the end of the list. To get to prefixTree, you have to walk to the * end of the list. * * For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as * such we reverse its meaning such that returnValue has a reversed * prefixTree at the head of the list. */ GenTreeCall::Use* Compiler::impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs) { assert(sig == nullptr || count == sig->numArgs); CORINFO_CLASS_HANDLE structType; GenTreeCall::Use* argList; if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) { argList = nullptr; } else { // ARG_ORDER_L2R argList = prefixArgs; } while (count--) { StackEntry se = impPopStack(); typeInfo ti = se.seTypeInfo; GenTree* temp = se.val; if (varTypeIsStruct(temp)) { // Morph trees that aren't already OBJs or MKREFANY to be OBJs assert(ti.IsType(TI_STRUCT)); structType = ti.GetClassHandleForValueClass(); bool forceNormalization = false; if (varTypeIsSIMD(temp)) { // We need to ensure that fgMorphArgs will use the correct struct handle to ensure proper // ABI handling of this argument. // Note that this can happen, for example, if we have a SIMD intrinsic that returns a SIMD type // with a different baseType than we've seen. // We also need to ensure an OBJ node if we have a FIELD node that might be transformed to LCL_FLD // or a plain GT_IND. // TODO-Cleanup: Consider whether we can eliminate all of these cases. if ((gtGetStructHandleIfPresent(temp) != structType) || temp->OperIs(GT_FIELD)) { forceNormalization = true; } } #ifdef DEBUG if (verbose) { printf("Calling impNormStructVal on:\n"); gtDispTree(temp); } #endif temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL, forceNormalization); #ifdef DEBUG if (verbose) { printf("resulting tree:\n"); gtDispTree(temp); } #endif } /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */ argList = gtPrependNewCallArg(temp, argList); } if (sig != nullptr) { if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS && sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggerred from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass); } CORINFO_ARG_LIST_HANDLE sigArgs = sig->args; GenTreeCall::Use* arg; for (arg = argList, count = sig->numArgs; count > 0; arg = arg->GetNext(), count--) { PREFIX_ASSUME(arg != nullptr); CORINFO_CLASS_HANDLE classHnd; CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArgs, &classHnd)); var_types jitSigType = JITtype2varType(corType); if (!impCheckImplicitArgumentCoercion(jitSigType, arg->GetNode()->TypeGet())) { BADCODE("the call argument has a type that can't be implicitly converted to the signature type"); } // insert implied casts (from float to double or double to float) if ((jitSigType == TYP_DOUBLE) && (arg->GetNode()->TypeGet() == TYP_FLOAT)) { arg->SetNode(gtNewCastNode(TYP_DOUBLE, arg->GetNode(), false, TYP_DOUBLE)); } else if ((jitSigType == TYP_FLOAT) && (arg->GetNode()->TypeGet() == TYP_DOUBLE)) { arg->SetNode(gtNewCastNode(TYP_FLOAT, arg->GetNode(), false, TYP_FLOAT)); } // insert any widening or narrowing casts for backwards compatibility arg->SetNode(impImplicitIorI4Cast(arg->GetNode(), jitSigType)); if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR && corType != CORINFO_TYPE_VAR) { CORINFO_CLASS_HANDLE argRealClass = info.compCompHnd->getArgClass(sig, sigArgs); if (argRealClass != nullptr) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggered from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass); } } const var_types nodeArgType = arg->GetNode()->TypeGet(); if (!varTypeIsStruct(jitSigType) && genTypeSize(nodeArgType) != genTypeSize(jitSigType)) { assert(!varTypeIsStruct(nodeArgType)); // Some ABI require precise size information for call arguments less than target pointer size, // for example arm64 OSX. Create a special node to keep this information until morph // consumes it into `fgArgInfo`. GenTree* putArgType = gtNewOperNode(GT_PUTARG_TYPE, jitSigType, arg->GetNode()); arg->SetNode(putArgType); } sigArgs = info.compCompHnd->getArgNext(sigArgs); } } if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) { // Prepend the prefixTree // Simple in-place reversal to place treeList // at the end of a reversed prefixTree while (prefixArgs != nullptr) { GenTreeCall::Use* next = prefixArgs->GetNext(); prefixArgs->SetNext(argList); argList = prefixArgs; prefixArgs = next; } } return argList; } static bool TypeIs(var_types type1, var_types type2) { return type1 == type2; } // Check if type1 matches any type from the list. template <typename... T> static bool TypeIs(var_types type1, var_types type2, T... rest) { return TypeIs(type1, type2) || TypeIs(type1, rest...); } //------------------------------------------------------------------------ // impCheckImplicitArgumentCoercion: check that the node's type is compatible with // the signature's type using ECMA implicit argument coercion table. // // Arguments: // sigType - the type in the call signature; // nodeType - the node type. // // Return Value: // true if they are compatible, false otherwise. // // Notes: // - it is currently allowing byref->long passing, should be fixed in VM; // - it can't check long -> native int case on 64-bit platforms, // so the behavior is different depending on the target bitness. // bool Compiler::impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const { if (sigType == nodeType) { return true; } if (TypeIs(sigType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT)) { if (TypeIs(nodeType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT, TYP_I_IMPL)) { return true; } } else if (TypeIs(sigType, TYP_ULONG, TYP_LONG)) { if (TypeIs(nodeType, TYP_LONG)) { return true; } } else if (TypeIs(sigType, TYP_FLOAT, TYP_DOUBLE)) { if (TypeIs(nodeType, TYP_FLOAT, TYP_DOUBLE)) { return true; } } else if (TypeIs(sigType, TYP_BYREF)) { if (TypeIs(nodeType, TYP_I_IMPL)) { return true; } // This condition tolerates such IL: // ; V00 this ref this class-hnd // ldarg.0 // call(byref) if (TypeIs(nodeType, TYP_REF)) { return true; } } else if (varTypeIsStruct(sigType)) { if (varTypeIsStruct(nodeType)) { return true; } } // This condition should not be under `else` because `TYP_I_IMPL` // intersects with `TYP_LONG` or `TYP_INT`. if (TypeIs(sigType, TYP_I_IMPL, TYP_U_IMPL)) { // Note that it allows `ldc.i8 1; call(nint)` on 64-bit platforms, // but we can't distinguish `nint` from `long` there. if (TypeIs(nodeType, TYP_I_IMPL, TYP_U_IMPL, TYP_INT, TYP_UINT)) { return true; } // It tolerates IL that ECMA does not allow but that is commonly used. // Example: // V02 loc1 struct <RTL_OSVERSIONINFOEX, 32> // ldloca.s 0x2 // call(native int) if (TypeIs(nodeType, TYP_BYREF)) { return true; } } return false; } /***************************************************************************** * * Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.) * The first "skipReverseCount" items are not reversed. */ GenTreeCall::Use* Compiler::impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount) { assert(skipReverseCount <= count); GenTreeCall::Use* list = impPopCallArgs(count, sig); // reverse the list if (list == nullptr || skipReverseCount == count) { return list; } GenTreeCall::Use* ptr = nullptr; // Initialized to the first node that needs to be reversed GenTreeCall::Use* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed if (skipReverseCount == 0) { ptr = list; } else { lastSkipNode = list; // Get to the first node that needs to be reversed for (unsigned i = 0; i < skipReverseCount - 1; i++) { lastSkipNode = lastSkipNode->GetNext(); } PREFIX_ASSUME(lastSkipNode != nullptr); ptr = lastSkipNode->GetNext(); } GenTreeCall::Use* reversedList = nullptr; do { GenTreeCall::Use* tmp = ptr->GetNext(); ptr->SetNext(reversedList); reversedList = ptr; ptr = tmp; } while (ptr != nullptr); if (skipReverseCount) { lastSkipNode->SetNext(reversedList); return list; } else { return reversedList; } } //------------------------------------------------------------------------ // impAssignStruct: Create a struct assignment // // Arguments: // dest - the destination of the assignment // src - the value to be assigned // structHnd - handle representing the struct type // curLevel - stack level for which a spill may be being done // pAfterStmt - statement to insert any additional statements after // ilOffset - il offset for new statements // block - block to insert any additional statements in // // Return Value: // The tree that should be appended to the statement list that represents the assignment. // // Notes: // Temp assignments may be appended to impStmtList if spilling is necessary. GenTree* Compiler::impAssignStruct(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt, /* = nullptr */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = nullptr */ ) { assert(varTypeIsStruct(dest)); DebugInfo usedDI = di; if (!usedDI.IsValid()) { usedDI = impCurStmtDI; } while (dest->gtOper == GT_COMMA) { // Second thing is the struct. assert(varTypeIsStruct(dest->AsOp()->gtOp2)); // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree. if (pAfterStmt) { Statement* newStmt = gtNewStmt(dest->AsOp()->gtOp1, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else { impAppendTree(dest->AsOp()->gtOp1, curLevel, usedDI); // do the side effect } // set dest to the second thing dest = dest->AsOp()->gtOp2; } assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD || dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX); // Return a NOP if this is a self-assignment. if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR && src->AsLclVarCommon()->GetLclNum() == dest->AsLclVarCommon()->GetLclNum()) { return gtNewNothingNode(); } // TODO-1stClassStructs: Avoid creating an address if it is not needed, // or re-creating a Blk node if it is. GenTree* destAddr; if (dest->gtOper == GT_IND || dest->OperIsBlk()) { destAddr = dest->AsOp()->gtOp1; } else { destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest); } return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, usedDI, block)); } //------------------------------------------------------------------------ // impAssignStructPtr: Assign (copy) the structure from 'src' to 'destAddr'. // // Arguments: // destAddr - address of the destination of the assignment // src - source of the assignment // structHnd - handle representing the struct type // curLevel - stack level for which a spill may be being done // pAfterStmt - statement to insert any additional statements after // di - debug info for new statements // block - block to insert any additional statements in // // Return Value: // The tree that should be appended to the statement list that represents the assignment. // // Notes: // Temp assignments may be appended to impStmtList if spilling is necessary. GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* dest = nullptr; GenTreeFlags destFlags = GTF_EMPTY; DebugInfo usedDI = di; if (!usedDI.IsValid()) { usedDI = impCurStmtDI; } #ifdef DEBUG #ifdef FEATURE_HW_INTRINSICS if (src->OperIs(GT_HWINTRINSIC)) { const GenTreeHWIntrinsic* intrinsic = src->AsHWIntrinsic(); if (HWIntrinsicInfo::IsMultiReg(intrinsic->GetHWIntrinsicId())) { assert(src->TypeGet() == TYP_STRUCT); } else { assert(varTypeIsSIMD(src)); } } else #endif // FEATURE_HW_INTRINSICS { assert(src->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_FIELD, GT_IND, GT_OBJ, GT_CALL, GT_MKREFANY, GT_RET_EXPR, GT_COMMA) || ((src->TypeGet() != TYP_STRUCT) && src->OperIsSIMD())); } #endif // DEBUG var_types asgType = src->TypeGet(); if (src->gtOper == GT_CALL) { GenTreeCall* srcCall = src->AsCall(); if (srcCall->TreatAsHasRetBufArg(this)) { // Case of call returning a struct via hidden retbuf arg CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_ARM) // Unmanaged instance methods on Windows or Unix X86 need the retbuf arg after the first (this) parameter if ((TargetOS::IsWindows || compUnixX86Abi()) && srcCall->IsUnmanaged()) { if (callConvIsInstanceMethodCallConv(srcCall->GetUnmanagedCallConv())) { #ifdef TARGET_X86 // The argument list has already been reversed. // Insert the return buffer as the second-to-last node // so it will be pushed on to the stack after the user args but before the native this arg // as required by the native ABI. GenTreeCall::Use* lastArg = srcCall->gtCallArgs; if (lastArg == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } else if (srcCall->GetUnmanagedCallConv() == CorInfoCallConvExtension::Thiscall) { // For thiscall, the "this" parameter is not included in the argument list reversal, // so we need to put the return buffer as the last parameter. for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext()) ; gtInsertNewCallArgAfter(destAddr, lastArg); } else if (lastArg->GetNext() == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, lastArg); } else { assert(lastArg != nullptr && lastArg->GetNext() != nullptr); GenTreeCall::Use* secondLastArg = lastArg; lastArg = lastArg->GetNext(); for (; lastArg->GetNext() != nullptr; secondLastArg = lastArg, lastArg = lastArg->GetNext()) ; assert(secondLastArg->GetNext() != nullptr); gtInsertNewCallArgAfter(destAddr, secondLastArg); } #else GenTreeCall::Use* thisArg = gtInsertNewCallArgAfter(destAddr, srcCall->gtCallArgs); #endif } else { #ifdef TARGET_X86 // The argument list has already been reversed. // Insert the return buffer as the last node so it will be pushed on to the stack last // as required by the native ABI. GenTreeCall::Use* lastArg = srcCall->gtCallArgs; if (lastArg == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } else { for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext()) ; gtInsertNewCallArgAfter(destAddr, lastArg); } #else // insert the return value buffer into the argument list as first byref parameter srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); #endif } } else #endif // !defined(TARGET_ARM) { // insert the return value buffer into the argument list as first byref parameter srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } // now returns void, not a struct src->gtType = TYP_VOID; // return the morphed call node return src; } else { // Case of call returning a struct in one or more registers. var_types returnType = (var_types)srcCall->gtReturnType; // First we try to change this to "LclVar/LclFld = call" // if ((destAddr->gtOper == GT_ADDR) && (destAddr->AsOp()->gtOp1->gtOper == GT_LCL_VAR)) { // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD. // That is, the IR will be of the form lclVar = call for multi-reg return // GenTreeLclVar* lcl = destAddr->AsOp()->gtOp1->AsLclVar(); unsigned lclNum = lcl->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (src->AsCall()->HasMultiRegRetVal()) { // Mark the struct LclVar as used in a MultiReg return context // which currently makes it non promotable. // TODO-1stClassStructs: Eliminate this pessimization when we can more generally // handle multireg returns. lcl->gtFlags |= GTF_DONT_CSE; varDsc->lvIsMultiRegRet = true; } dest = lcl; #if defined(TARGET_ARM) // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case, // but that method has not been updadted to include ARM. impMarkLclDstNotPromotable(lclNum, src, structHnd); lcl->gtFlags |= GTF_DONT_CSE; #elif defined(UNIX_AMD64_ABI) // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs. assert(!src->AsCall()->IsVarargs() && "varargs not allowed for System V OSs."); // Make the struct non promotable. The eightbytes could contain multiple fields. // TODO-1stClassStructs: Eliminate this pessimization when we can more generally // handle multireg returns. // TODO-Cleanup: Why is this needed here? This seems that it will set this even for // non-multireg returns. lcl->gtFlags |= GTF_DONT_CSE; varDsc->lvIsMultiRegRet = true; #endif } else // we don't have a GT_ADDR of a GT_LCL_VAR { // !!! The destination could be on stack. !!! // This flag will let us choose the correct write barrier. asgType = returnType; destFlags = GTF_IND_TGTANYWHERE; } } } else if (src->gtOper == GT_RET_EXPR) { GenTreeCall* call = src->AsRetExpr()->gtInlineCandidate->AsCall(); noway_assert(call->gtOper == GT_CALL); if (call->HasRetBufArg()) { // insert the return value buffer into the argument list as first byref parameter call->gtCallArgs = gtPrependNewCallArg(destAddr, call->gtCallArgs); // now returns void, not a struct src->gtType = TYP_VOID; call->gtType = TYP_VOID; // We already have appended the write to 'dest' GT_CALL's args // So now we just return an empty node (pruning the GT_RET_EXPR) return src; } else { // Case of inline method returning a struct in one or more registers. // We won't need a return buffer asgType = src->gtType; if ((destAddr->gtOper != GT_ADDR) || (destAddr->AsOp()->gtOp1->gtOper != GT_LCL_VAR)) { // !!! The destination could be on stack. !!! // This flag will let us choose the correct write barrier. destFlags = GTF_IND_TGTANYWHERE; } } } else if (src->OperIsBlk()) { asgType = impNormStructType(structHnd); if (src->gtOper == GT_OBJ) { assert(src->AsObj()->GetLayout()->GetClassHandle() == structHnd); } } else if (src->gtOper == GT_INDEX) { asgType = impNormStructType(structHnd); assert(src->AsIndex()->gtStructElemClass == structHnd); } else if (src->gtOper == GT_MKREFANY) { // Since we are assigning the result of a GT_MKREFANY, // "destAddr" must point to a refany. GenTree* destAddrClone; destAddr = impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment")); assert(OFFSETOF__CORINFO_TypedReference__dataPtr == 0); assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF); fgAddFieldSeqForZeroOffset(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField())); GenTree* ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr); GenTreeIntCon* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL); typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField()); GenTree* typeSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset)); // append the assign of the pointer value GenTree* asg = gtNewAssignNode(ptrSlot, src->AsOp()->gtOp1); if (pAfterStmt) { Statement* newStmt = gtNewStmt(asg, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else { impAppendTree(asg, curLevel, usedDI); } // return the assign of the type value, to be appended return gtNewAssignNode(typeSlot, src->AsOp()->gtOp2); } else if (src->gtOper == GT_COMMA) { // The second thing is the struct or its address. assert(varTypeIsStruct(src->AsOp()->gtOp2) || src->AsOp()->gtOp2->gtType == TYP_BYREF); if (pAfterStmt) { // Insert op1 after '*pAfterStmt' Statement* newStmt = gtNewStmt(src->AsOp()->gtOp1, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else if (impLastStmt != nullptr) { // Do the side-effect as a separate statement. impAppendTree(src->AsOp()->gtOp1, curLevel, usedDI); } else { // In this case we have neither been given a statement to insert after, nor are we // in the importer where we can append the side effect. // Instead, we're going to sink the assignment below the COMMA. src->AsOp()->gtOp2 = impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block); return src; } // Evaluate the second thing using recursion. return impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block); } else if (src->IsLocal()) { asgType = src->TypeGet(); } else if (asgType == TYP_STRUCT) { // It should already have the appropriate type. assert(asgType == impNormStructType(structHnd)); } if ((dest == nullptr) && (destAddr->OperGet() == GT_ADDR)) { GenTree* destNode = destAddr->gtGetOp1(); // If the actual destination is a local, a GT_INDEX or a block node, or is a node that // will be morphed, don't insert an OBJ(ADDR) if it already has the right type. if (destNode->OperIs(GT_LCL_VAR, GT_INDEX) || destNode->OperIsBlk()) { var_types destType = destNode->TypeGet(); // If one or both types are TYP_STRUCT (one may not yet be normalized), they are compatible // iff their handles are the same. // Otherwise, they are compatible if their types are the same. bool typesAreCompatible = ((destType == TYP_STRUCT) || (asgType == TYP_STRUCT)) ? ((gtGetStructHandleIfPresent(destNode) == structHnd) && varTypeIsStruct(asgType)) : (destType == asgType); if (typesAreCompatible) { dest = destNode; if (destType != TYP_STRUCT) { // Use a normalized type if available. We know from above that they're equivalent. asgType = destType; } } } } if (dest == nullptr) { if (asgType == TYP_STRUCT) { dest = gtNewObjNode(structHnd, destAddr); gtSetObjGcInfo(dest->AsObj()); // Although an obj as a call argument was always assumed to be a globRef // (which is itself overly conservative), that is not true of the operands // of a block assignment. dest->gtFlags &= ~GTF_GLOB_REF; dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF); } else { dest = gtNewOperNode(GT_IND, asgType, destAddr); } } if (dest->OperIs(GT_LCL_VAR) && (src->IsMultiRegNode() || (src->OperIs(GT_RET_EXPR) && src->AsRetExpr()->gtInlineCandidate->AsCall()->HasMultiRegRetVal()))) { if (lvaEnregMultiRegVars && varTypeIsStruct(dest)) { dest->AsLclVar()->SetMultiReg(); } if (src->OperIs(GT_CALL)) { lvaGetDesc(dest->AsLclVar())->lvIsMultiRegRet = true; } } dest->gtFlags |= destFlags; destFlags = dest->gtFlags; // return an assignment node, to be appended GenTree* asgNode = gtNewAssignNode(dest, src); gtBlockOpInit(asgNode, dest, src, false); // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs // of assignments. if ((destFlags & GTF_DONT_CSE) == 0) { dest->gtFlags &= ~(GTF_DONT_CSE); } return asgNode; } /***************************************************************************** Given a struct value, and the class handle for that structure, return the expression for the address for that structure value. willDeref - does the caller guarantee to dereference the pointer. */ GenTree* Compiler::impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref) { assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd)); var_types type = structVal->TypeGet(); genTreeOps oper = structVal->gtOper; if (oper == GT_OBJ && willDeref) { assert(structVal->AsObj()->GetLayout()->GetClassHandle() == structHnd); return (structVal->AsObj()->Addr()); } else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY || structVal->OperIsSimdOrHWintrinsic()) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj")); impAssignTempGen(tmpNum, structVal, structHnd, curLevel); // The 'return value' is now the temp itself type = genActualType(lvaTable[tmpNum].TypeGet()); GenTree* temp = gtNewLclvNode(tmpNum, type); temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp); return temp; } else if (oper == GT_COMMA) { assert(structVal->AsOp()->gtOp2->gtType == type); // Second thing is the struct Statement* oldLastStmt = impLastStmt; structVal->AsOp()->gtOp2 = impGetStructAddr(structVal->AsOp()->gtOp2, structHnd, curLevel, willDeref); structVal->gtType = TYP_BYREF; if (oldLastStmt != impLastStmt) { // Some temp assignment statement was placed on the statement list // for Op2, but that would be out of order with op1, so we need to // spill op1 onto the statement list after whatever was last // before we recursed on Op2 (i.e. before whatever Op2 appended). Statement* beforeStmt; if (oldLastStmt == nullptr) { // The op1 stmt should be the first in the list. beforeStmt = impStmtList; } else { // Insert after the oldLastStmt before the first inserted for op2. beforeStmt = oldLastStmt->GetNextStmt(); } impInsertTreeBefore(structVal->AsOp()->gtOp1, impCurStmtDI, beforeStmt); structVal->AsOp()->gtOp1 = gtNewNothingNode(); } return (structVal); } return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } //------------------------------------------------------------------------ // impNormStructType: Normalize the type of a (known to be) struct class handle. // // Arguments: // structHnd - The class handle for the struct type of interest. // pSimdBaseJitType - (optional, default nullptr) - if non-null, and the struct is a SIMD // type, set to the SIMD base JIT type // // Return Value: // The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*). // It may also modify the compFloatingPointUsed flag if the type is a SIMD type. // // Notes: // Normalizing the type involves examining the struct type to determine if it should // be modified to one that is handled specially by the JIT, possibly being a candidate // for full enregistration, e.g. TYP_SIMD16. If the size of the struct is already known // call structSizeMightRepresentSIMDType to determine if this api needs to be called. var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* pSimdBaseJitType) { assert(structHnd != NO_CLASS_HANDLE); var_types structType = TYP_STRUCT; #ifdef FEATURE_SIMD if (supportSIMDTypes()) { const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd); // Don't bother if the struct contains GC references of byrefs, it can't be a SIMD type. if ((structFlags & (CORINFO_FLG_CONTAINS_GC_PTR | CORINFO_FLG_BYREF_LIKE)) == 0) { unsigned originalSize = info.compCompHnd->getClassSize(structHnd); if (structSizeMightRepresentSIMDType(originalSize)) { unsigned int sizeBytes; CorInfoType simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(structHnd, &sizeBytes); if (simdBaseJitType != CORINFO_TYPE_UNDEF) { assert(sizeBytes == originalSize); structType = getSIMDTypeForSize(sizeBytes); if (pSimdBaseJitType != nullptr) { *pSimdBaseJitType = simdBaseJitType; } // Also indicate that we use floating point registers. compFloatingPointUsed = true; } } } } #endif // FEATURE_SIMD return structType; } //------------------------------------------------------------------------ // Compiler::impNormStructVal: Normalize a struct value // // Arguments: // structVal - the node we are going to normalize // structHnd - the class handle for the node // curLevel - the current stack level // forceNormalization - Force the creation of an OBJ node (default is false). // // Notes: // Given struct value 'structVal', make sure it is 'canonical', that is // it is either: // - a known struct type (non-TYP_STRUCT, e.g. TYP_SIMD8) // - an OBJ or a MKREFANY node, or // - a node (e.g. GT_INDEX) that will be morphed. // If the node is a CALL or RET_EXPR, a copy will be made to a new temp. // GenTree* Compiler::impNormStructVal(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool forceNormalization /*=false*/) { assert(forceNormalization || varTypeIsStruct(structVal)); assert(structHnd != NO_CLASS_HANDLE); var_types structType = structVal->TypeGet(); bool makeTemp = false; if (structType == TYP_STRUCT) { structType = impNormStructType(structHnd); } bool alreadyNormalized = false; GenTreeLclVarCommon* structLcl = nullptr; genTreeOps oper = structVal->OperGet(); switch (oper) { // GT_RETURN and GT_MKREFANY don't capture the handle. case GT_RETURN: break; case GT_MKREFANY: alreadyNormalized = true; break; case GT_CALL: structVal->AsCall()->gtRetClsHnd = structHnd; makeTemp = true; break; case GT_RET_EXPR: structVal->AsRetExpr()->gtRetClsHnd = structHnd; makeTemp = true; break; case GT_ARGPLACE: structVal->AsArgPlace()->gtArgPlaceClsHnd = structHnd; break; case GT_INDEX: // This will be transformed to an OBJ later. alreadyNormalized = true; structVal->AsIndex()->gtStructElemClass = structHnd; structVal->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(structHnd); break; case GT_FIELD: // Wrap it in a GT_OBJ, if needed. structVal->gtType = structType; if ((structType == TYP_STRUCT) || forceNormalization) { structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } break; case GT_LCL_VAR: case GT_LCL_FLD: structLcl = structVal->AsLclVarCommon(); // Wrap it in a GT_OBJ. structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); FALLTHROUGH; case GT_OBJ: case GT_BLK: case GT_ASG: // These should already have the appropriate type. assert(structVal->gtType == structType); alreadyNormalized = true; break; case GT_IND: assert(structVal->gtType == structType); structVal = gtNewObjNode(structHnd, structVal->gtGetOp1()); alreadyNormalized = true; break; #ifdef FEATURE_SIMD case GT_SIMD: assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType)); break; #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: assert(structVal->gtType == structType); assert(varTypeIsSIMD(structVal) || HWIntrinsicInfo::IsMultiReg(structVal->AsHWIntrinsic()->GetHWIntrinsicId())); break; #endif case GT_COMMA: { // The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node. GenTree* blockNode = structVal->AsOp()->gtOp2; assert(blockNode->gtType == structType); // Is this GT_COMMA(op1, GT_COMMA())? GenTree* parent = structVal; if (blockNode->OperGet() == GT_COMMA) { // Find the last node in the comma chain. do { assert(blockNode->gtType == structType); parent = blockNode; blockNode = blockNode->AsOp()->gtOp2; } while (blockNode->OperGet() == GT_COMMA); } if (blockNode->OperGet() == GT_FIELD) { // If we have a GT_FIELD then wrap it in a GT_OBJ. blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode)); } #ifdef FEATURE_SIMD if (blockNode->OperIsSimdOrHWintrinsic()) { parent->AsOp()->gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization); alreadyNormalized = true; } else #endif { noway_assert(blockNode->OperIsBlk()); // Sink the GT_COMMA below the blockNode addr. // That is GT_COMMA(op1, op2=blockNode) is tranformed into // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)). // // In case of a chained GT_COMMA case, we sink the last // GT_COMMA below the blockNode addr. GenTree* blockNodeAddr = blockNode->AsOp()->gtOp1; assert(blockNodeAddr->gtType == TYP_BYREF); GenTree* commaNode = parent; commaNode->gtType = TYP_BYREF; commaNode->AsOp()->gtOp2 = blockNodeAddr; blockNode->AsOp()->gtOp1 = commaNode; if (parent == structVal) { structVal = blockNode; } alreadyNormalized = true; } } break; default: noway_assert(!"Unexpected node in impNormStructVal()"); break; } structVal->gtType = structType; if (!alreadyNormalized || forceNormalization) { if (makeTemp) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj")); impAssignTempGen(tmpNum, structVal, structHnd, curLevel); // The structVal is now the temp itself structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon(); structVal = structLcl; } if ((forceNormalization || (structType == TYP_STRUCT)) && !structVal->OperIsBlk()) { // Wrap it in a GT_OBJ structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } } if (structLcl != nullptr) { // A OBJ on a ADDR(LCL_VAR) can never raise an exception // so we don't set GTF_EXCEPT here. if (!lvaIsImplicitByRefLocal(structLcl->GetLclNum())) { structVal->gtFlags &= ~GTF_GLOB_REF; } } else if (structVal->OperIsBlk()) { // In general a OBJ is an indirection and could raise an exception. structVal->gtFlags |= GTF_EXCEPT; } return structVal; } /******************************************************************************/ // Given a type token, generate code that will evaluate to the correct // handle representation of that token (type handle, field handle, or method handle) // // For most cases, the handle is determined at compile-time, and the code // generated is simply an embedded handle. // // Run-time lookup is required if the enclosing method is shared between instantiations // and the token refers to formal type parameters whose instantiation is not known // at compile-time. // GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup /* = NULL */, bool mustRestoreHandle /* = false */, bool importParent /* = false */) { assert(!fgGlobalMorph); CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo); if (pRuntimeLookup) { *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup; } if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup) { switch (embedInfo.handleType) { case CORINFO_HANDLETYPE_CLASS: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle); break; case CORINFO_HANDLETYPE_METHOD: info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle); break; case CORINFO_HANDLETYPE_FIELD: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun( info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle)); break; default: break; } } // Generate the full lookup tree. May be null if we're abandoning an inline attempt. GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token), embedInfo.compileTimeHandle); // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node. if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup) { result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result); } return result; } GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { if (!pLookup->lookupKind.needsRuntimeLookup) { // No runtime lookup is required. // Access is direct or memory-indirect (of a fixed address) reference CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE); if (pLookup->constLookup.accessType == IAT_VALUE) { handle = pLookup->constLookup.handle; } else if (pLookup->constLookup.accessType == IAT_PVALUE) { pIndirection = pLookup->constLookup.addr; } GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); #ifdef DEBUG size_t handleToTrack; if (handleFlags == GTF_ICON_TOKEN_HDL) { handleToTrack = 0; } else { handleToTrack = (size_t)compileTimeHandle; } if (handle != nullptr) { addr->AsIntCon()->gtTargetHandle = handleToTrack; } else { addr->gtGetOp1()->AsIntCon()->gtTargetHandle = handleToTrack; } #endif return addr; } if (pLookup->lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED) { // Runtime does not support inlining of all shapes of runtime lookups // Inlining has to be aborted in such a case assert(compIsForInlining()); compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP); return nullptr; } // Need to use dictionary-based access which depends on the typeContext // which is only available at runtime, not at compile-time. return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle); } #ifdef FEATURE_READYTORUN GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->accessType != IAT_PPVALUE && pLookup->accessType != IAT_RELPVALUE); if (pLookup->accessType == IAT_VALUE) { handle = pLookup->handle; } else if (pLookup->accessType == IAT_PVALUE) { pIndirection = pLookup->addr; } GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); #ifdef DEBUG assert((handleFlags == GTF_ICON_CLASS_HDL) || (handleFlags == GTF_ICON_METHOD_HDL)); if (handle != nullptr) { addr->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle; } else { addr->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle; } #endif // DEBUG return addr; } //------------------------------------------------------------------------ // impIsCastHelperEligibleForClassProbe: Checks whether a tree is a cast helper eligible to // to be profiled and then optimized with PGO data // // Arguments: // tree - the tree object to check // // Returns: // true if the tree is a cast helper eligible to be profiled // bool Compiler::impIsCastHelperEligibleForClassProbe(GenTree* tree) { if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) || (JitConfig.JitCastProfiling() != 1)) { return false; } if (tree->IsCall() && tree->AsCall()->gtCallType == CT_HELPER) { const CorInfoHelpFunc helper = eeGetHelperNum(tree->AsCall()->gtCallMethHnd); if ((helper == CORINFO_HELP_ISINSTANCEOFINTERFACE) || (helper == CORINFO_HELP_ISINSTANCEOFCLASS) || (helper == CORINFO_HELP_CHKCASTCLASS) || (helper == CORINFO_HELP_CHKCASTINTERFACE)) { return true; } } return false; } //------------------------------------------------------------------------ // impIsCastHelperMayHaveProfileData: Checks whether a tree is a cast helper that might // have profile data // // Arguments: // tree - the tree object to check // // Returns: // true if the tree is a cast helper with potential profile data // bool Compiler::impIsCastHelperMayHaveProfileData(GenTree* tree) { if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT) || (JitConfig.JitCastProfiling() != 1)) { return false; } if (tree->IsCall() && tree->AsCall()->gtCallType == CT_HELPER) { const CorInfoHelpFunc helper = eeGetHelperNum(tree->AsCall()->gtCallMethHnd); if ((helper == CORINFO_HELP_ISINSTANCEOFINTERFACE) || (helper == CORINFO_HELP_ISINSTANCEOFCLASS) || (helper == CORINFO_HELP_CHKCASTCLASS) || (helper == CORINFO_HELP_CHKCASTINTERFACE)) { return true; } } return false; } GenTreeCall* Compiler::impReadyToRunHelperToTree( CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoHelpFunc helper, var_types type, GenTreeCall::Use* args /* = nullptr */, CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */) { CORINFO_CONST_LOOKUP lookup; if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup)) { return nullptr; } GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args); op1->setEntryPoint(lookup); return op1; } #endif GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* op1 = nullptr; switch (pCallInfo->kind) { case CORINFO_CALL: op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod); #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { op1->AsFptrVal()->gtEntryPoint = pCallInfo->codePointerLookup.constLookup; } #endif break; case CORINFO_CALL_CODE_POINTER: op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod); break; default: noway_assert(!"unknown call kind"); break; } return op1; } //------------------------------------------------------------------------ // getRuntimeContextTree: find pointer to context for runtime lookup. // // Arguments: // kind - lookup kind. // // Return Value: // Return GenTree pointer to generic shared context. // // Notes: // Reports about generic context using. GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind) { GenTree* ctxTree = nullptr; // Collectible types requires that for shared generic code, if we use the generic context parameter // that we report it. (This is a conservative approach, we could detect some cases particularly when the // context parameter is this that we don't need the eager reporting logic.) lvaGenericsContextInUse = true; Compiler* pRoot = impInlineRoot(); if (kind == CORINFO_LOOKUP_THISOBJ) { // this Object ctxTree = gtNewLclvNode(pRoot->info.compThisArg, TYP_REF); ctxTree->gtFlags |= GTF_VAR_CONTEXT; // context is the method table pointer of the this object ctxTree = gtNewMethodTableLookup(ctxTree); } else { assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM); // Exact method descriptor as passed in ctxTree = gtNewLclvNode(pRoot->info.compTypeCtxtArg, TYP_I_IMPL); ctxTree->gtFlags |= GTF_VAR_CONTEXT; } return ctxTree; } /*****************************************************************************/ /* Import a dictionary lookup to access a handle in code shared between generic instantiations. The lookup depends on the typeContext which is only available at runtime, and not at compile-time. pLookup->token1 and pLookup->token2 specify the handle that is needed. The cases are: 1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the instantiation-specific handle, and the tokens to lookup the handle. 2. pLookup->indirections != CORINFO_USEHELPER : 2a. pLookup->testForNull == false : Dereference the instantiation-specific handle to get the handle. 2b. pLookup->testForNull == true : Dereference the instantiation-specific handle. If it is non-NULL, it is the handle required. Else, call a helper to lookup the handle. */ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle) { GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind); CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup; // It's available only via the run-time helper function if (pRuntimeLookup->indirections == CORINFO_USEHELPER) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL, gtNewCallArgs(ctxTree), &pLookup->lookupKind); } #endif return gtNewRuntimeLookupHelperCallNode(pRuntimeLookup, ctxTree, compileTimeHandle); } // Slot pointer GenTree* slotPtrTree = ctxTree; if (pRuntimeLookup->testForNull) { slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup slot")); } GenTree* indOffTree = nullptr; GenTree* lastIndOfTree = nullptr; // Applied repeated indirections for (WORD i = 0; i < pRuntimeLookup->indirections; i++) { if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup indirectOffset")); } // The last indirection could be subject to a size check (dynamic dictionary expansion) bool isLastIndirectionWithSizeCheck = ((i == pRuntimeLookup->indirections - 1) && (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK)); if (i != 0) { slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); slotPtrTree->gtFlags |= GTF_IND_NONFAULTING; if (!isLastIndirectionWithSizeCheck) { slotPtrTree->gtFlags |= GTF_IND_INVARIANT; } } if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree); } if (pRuntimeLookup->offsets[i] != 0) { if (isLastIndirectionWithSizeCheck) { lastIndOfTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup indirectOffset")); } slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL)); } } // No null test required if (!pRuntimeLookup->testForNull) { if (pRuntimeLookup->indirections == 0) { return slotPtrTree; } slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); slotPtrTree->gtFlags |= GTF_IND_NONFAULTING; if (!pRuntimeLookup->testForFixup) { return slotPtrTree; } impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0")); unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test")); impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtDI); GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); // downcast the pointer to a TYP_INT on 64-bit targets slot = impImplicitIorI4Cast(slot, TYP_INT); // Use a GT_AND to check for the lowest bit and indirect if it is set GenTree* test = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1)); GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0)); // slot = GT_IND(slot - 1) slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); GenTree* add = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL)); GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add); indir->gtFlags |= GTF_IND_NONFAULTING; indir->gtFlags |= GTF_IND_INVARIANT; slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); GenTree* asg = gtNewAssignNode(slot, indir); GenTreeColon* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg); GenTreeQmark* qmark = gtNewQmarkNode(TYP_VOID, relop, colon); impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); return gtNewLclvNode(slotLclNum, TYP_I_IMPL); } assert(pRuntimeLookup->indirections != 0); impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1")); // Extract the handle GenTree* handleForNullCheck = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); handleForNullCheck->gtFlags |= GTF_IND_NONFAULTING; // Call the helper // - Setup argNode with the pointer to the signature returned by the lookup GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_GLOBAL_PTR, compileTimeHandle); GenTreeCall::Use* helperArgs = gtNewCallArgs(ctxTree, argNode); GenTreeCall* helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs); // Check for null and possibly call helper GenTree* nullCheck = gtNewOperNode(GT_NE, TYP_INT, handleForNullCheck, gtNewIconNode(0, TYP_I_IMPL)); GenTree* handleForResult = gtCloneExpr(handleForNullCheck); GenTree* result = nullptr; if (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK) { // Dynamic dictionary expansion support assert((lastIndOfTree != nullptr) && (pRuntimeLookup->indirections > 0)); // sizeValue = dictionary[pRuntimeLookup->sizeOffset] GenTreeIntCon* sizeOffset = gtNewIconNode(pRuntimeLookup->sizeOffset, TYP_I_IMPL); GenTree* sizeValueOffset = gtNewOperNode(GT_ADD, TYP_I_IMPL, lastIndOfTree, sizeOffset); GenTree* sizeValue = gtNewOperNode(GT_IND, TYP_I_IMPL, sizeValueOffset); sizeValue->gtFlags |= GTF_IND_NONFAULTING; // sizeCheck fails if sizeValue < pRuntimeLookup->offsets[i] GenTree* offsetValue = gtNewIconNode(pRuntimeLookup->offsets[pRuntimeLookup->indirections - 1], TYP_I_IMPL); GenTree* sizeCheck = gtNewOperNode(GT_LE, TYP_INT, sizeValue, offsetValue); // revert null check condition. nullCheck->ChangeOperUnchecked(GT_EQ); // ((sizeCheck fails || nullCheck fails))) ? (helperCall : handle). // Add checks and the handle as call arguments, indirect call transformer will handle this. helperCall->gtCallArgs = gtPrependNewCallArg(handleForResult, helperCall->gtCallArgs); helperCall->gtCallArgs = gtPrependNewCallArg(sizeCheck, helperCall->gtCallArgs); helperCall->gtCallArgs = gtPrependNewCallArg(nullCheck, helperCall->gtCallArgs); result = helperCall; addExpRuntimeLookupCandidate(helperCall); } else { GenTreeColon* colonNullCheck = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, handleForResult, helperCall); result = gtNewQmarkNode(TYP_I_IMPL, nullCheck, colonNullCheck); } unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling Runtime Lookup tree")); impAssignTempGen(tmp, result, (unsigned)CHECK_SPILL_NONE); return gtNewLclvNode(tmp, TYP_I_IMPL); } /****************************************************************************** * Spills the stack at verCurrentState.esStack[level] and replaces it with a temp. * If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum, * else, grab a new temp. * For structs (which can be pushed on the stack using obj, etc), * special handling is needed */ struct RecursiveGuard { public: RecursiveGuard() { m_pAddress = nullptr; } ~RecursiveGuard() { if (m_pAddress) { *m_pAddress = false; } } void Init(bool* pAddress, bool bInitialize) { assert(pAddress && *pAddress == false && "Recursive guard violation"); m_pAddress = pAddress; if (bInitialize) { *m_pAddress = true; } } protected: bool* m_pAddress; }; bool Compiler::impSpillStackEntry(unsigned level, unsigned tnum #ifdef DEBUG , bool bAssertOnRecursion, const char* reason #endif ) { #ifdef DEBUG RecursiveGuard guard; guard.Init(&impNestedStackSpill, bAssertOnRecursion); #endif GenTree* tree = verCurrentState.esStack[level].val; /* Allocate a temp if we haven't been asked to use a particular one */ if (tnum != BAD_VAR_NUM && (tnum >= lvaCount)) { return false; } bool isNewTemp = false; if (tnum == BAD_VAR_NUM) { tnum = lvaGrabTemp(true DEBUGARG(reason)); isNewTemp = true; } /* Assign the spilled entry to the temp */ impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level); // If temp is newly introduced and a ref type, grab what type info we can. if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF)) { assert(lvaTable[tnum].lvSingleDef == 0); lvaTable[tnum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tnum); CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle(); lvaSetClass(tnum, tree, stkHnd); // If we're assigning a GT_RET_EXPR, note the temp over on the call, // so the inliner can use it in case it needs a return spill temp. if (tree->OperGet() == GT_RET_EXPR) { JITDUMP("\n*** see V%02u = GT_RET_EXPR, noting temp\n", tnum); GenTree* call = tree->AsRetExpr()->gtInlineCandidate; InlineCandidateInfo* ici = call->AsCall()->gtInlineCandidateInfo; ici->preexistingSpillTemp = tnum; } } // The tree type may be modified by impAssignTempGen, so use the type of the lclVar. var_types type = genActualType(lvaTable[tnum].TypeGet()); GenTree* temp = gtNewLclvNode(tnum, type); verCurrentState.esStack[level].val = temp; return true; } /***************************************************************************** * * Ensure that the stack has only spilled values */ void Compiler::impSpillStackEnsure(bool spillLeaves) { assert(!spillLeaves || opts.compDbgCode); for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; if (!spillLeaves && tree->OperIsLeaf()) { continue; } // Temps introduced by the importer itself don't need to be spilled bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->AsLclVarCommon()->GetLclNum() >= info.compLocalsCount); if (isTempLcl) { continue; } impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure")); } } void Compiler::impSpillEvalStack() { for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack")); } } /***************************************************************************** * * If the stack contains any trees with side effects in them, assign those * trees to temps and append the assignments to the statement list. * On return the stack is guaranteed to be empty. */ inline void Compiler::impEvalSideEffects() { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects")); verCurrentState.esStackDepth = 0; } /***************************************************************************** * * If the stack contains any trees with side effects in them, assign those * trees to temps and replace them on the stack with refs to their temps. * [0..chkLevel) is the portion of the stack which will be checked and spilled. */ inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)) { assert(chkLevel != (unsigned)CHECK_SPILL_NONE); /* Before we make any appends to the tree list we must spill the * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */ impSpillSpecialSideEff(); if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } assert(chkLevel <= verCurrentState.esStackDepth); GenTreeFlags spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT; for (unsigned i = 0; i < chkLevel; i++) { GenTree* tree = verCurrentState.esStack[i].val; if ((tree->gtFlags & spillFlags) != 0 || (spillGlobEffects && // Only consider the following when spillGlobEffects == true !impIsAddressInLocal(tree) && // No need to spill the GT_ADDR node on a local. gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or // lvAddrTaken flag. { impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason)); } } } /***************************************************************************** * * If the stack contains any trees with special side effects in them, assign * those trees to temps and replace them on the stack with refs to their temps. */ inline void Compiler::impSpillSpecialSideEff() { // Only exception objects need to be carefully handled if (!compCurBB->bbCatchTyp) { return; } for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; // Make sure if we have an exception object in the sub tree we spill ourselves. if (gtHasCatchArg(tree)) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff")); } } } /***************************************************************************** * * Spill all stack references to value classes (TYP_STRUCT nodes) */ void Compiler::impSpillValueClasses() { for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT) { // Tree walk was aborted, which means that we found a // value class on the stack. Need to spill that // stack entry. impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses")); } } } /***************************************************************************** * * Callback that checks if a tree node is TYP_STRUCT */ Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data) { fgWalkResult walkResult = WALK_CONTINUE; if ((*pTree)->gtType == TYP_STRUCT) { // Abort the walk and indicate that we found a value class walkResult = WALK_ABORT; } return walkResult; } /***************************************************************************** * * If the stack contains any trees with references to local #lclNum, assign * those trees to temps and replace their place on the stack with refs to * their temps. */ void Compiler::impSpillLclRefs(ssize_t lclNum) { /* Before we make any appends to the tree list we must spill the * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */ impSpillSpecialSideEff(); for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; /* If the tree may throw an exception, and the block has a handler, then we need to spill assignments to the local if the local is live on entry to the handler. Just spill 'em all without considering the liveness */ bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT)); /* Skip the tree if it doesn't have an affected reference, unless xcptnCaught */ if (xcptnCaught || gtHasRef(tree, lclNum)) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs")); } } } /***************************************************************************** * * Push catch arg onto the stack. * If there are jumps to the beginning of the handler, insert basic block * and spill catch arg to a temp. Update the handler block if necessary. * * Returns the basic block of the actual handler. */ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter) { // Do not inject the basic block twice on reimport. This should be // hit only under JIT stress. See if the block is the one we injected. // Note that EH canonicalization can inject internal blocks here. We might // be able to re-use such a block (but we don't, right now). if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE)) == (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE)) { Statement* stmt = hndBlk->firstStmt(); if (stmt != nullptr) { GenTree* tree = stmt->GetRootNode(); assert(tree != nullptr); if ((tree->gtOper == GT_ASG) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) && (tree->AsOp()->gtOp2->gtOper == GT_CATCH_ARG)) { tree = gtNewLclvNode(tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(), TYP_REF); impPushOnStack(tree, typeInfo(TI_REF, clsHnd)); return hndBlk->bbNext; } } // If we get here, it must have been some other kind of internal block. It's possible that // someone prepended something to our injected block, but that's unlikely. } /* Push the exception address value on the stack */ GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF); /* Mark the node as having a side-effect - i.e. cannot be * moved around since it is tied to a fixed location (EAX) */ arg->gtFlags |= GTF_ORDER_SIDEEFF; #if defined(JIT32_GCENCODER) const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5); #else const bool forceInsertNewBlock = compStressCompile(STRESS_CATCH_ARG, 5); #endif // defined(JIT32_GCENCODER) /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */ if (hndBlk->bbRefs > 1 || forceInsertNewBlock) { if (hndBlk->bbRefs == 1) { hndBlk->bbRefs++; } /* Create extra basic block for the spill */ BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true); newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE; newBlk->inheritWeight(hndBlk); newBlk->bbCodeOffs = hndBlk->bbCodeOffs; /* Account for the new link we are about to create */ hndBlk->bbRefs++; // Spill into a temp. unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg")); lvaTable[tempNum].lvType = TYP_REF; GenTree* argAsg = gtNewTempAssign(tempNum, arg); arg = gtNewLclvNode(tempNum, TYP_REF); hndBlk->bbStkTempsIn = tempNum; Statement* argStmt; if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { // Report the debug info. impImportBlockCode won't treat the actual handler as exception block and thus // won't do it for us. // TODO-DEBUGINFO: Previous code always set stack as non-empty // here. Can we not just use impCurStmtOffsSet? Are we out of sync // here with the stack? impCurStmtDI = DebugInfo(compInlineContext, ILLocation(newBlk->bbCodeOffs, false, false)); argStmt = gtNewStmt(argAsg, impCurStmtDI); } else { argStmt = gtNewStmt(argAsg); } fgInsertStmtAtEnd(newBlk, argStmt); } impPushOnStack(arg, typeInfo(TI_REF, clsHnd)); return hndBlk; } /***************************************************************************** * * Given a tree, clone it. *pClone is set to the cloned tree. * Returns the original tree if the cloning was easy, * else returns the temp to which the tree had to be spilled to. * If the tree has side-effects, it will be spilled to a temp. */ GenTree* Compiler::impCloneExpr(GenTree* tree, GenTree** pClone, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt DEBUGARG(const char* reason)) { if (!(tree->gtFlags & GTF_GLOB_EFFECT)) { GenTree* clone = gtClone(tree, true); if (clone) { *pClone = clone; return tree; } } /* Store the operand in a temp and return the temp */ unsigned temp = lvaGrabTemp(true DEBUGARG(reason)); // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which // return a struct type. It also may modify the struct type to a more // specialized type (e.g. a SIMD type). So we will get the type from // the lclVar AFTER calling impAssignTempGen(). impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtDI); var_types type = genActualType(lvaTable[temp].TypeGet()); *pClone = gtNewLclvNode(temp, type); return gtNewLclvNode(temp, type); } //------------------------------------------------------------------------ // impCreateDIWithCurrentStackInfo: Create a DebugInfo instance with the // specified IL offset and 'is call' bit, using the current stack to determine // whether to set the 'stack empty' bit. // // Arguments: // offs - the IL offset for the DebugInfo // isCall - whether the created DebugInfo should have the IsCall bit set // // Return Value: // The DebugInfo instance. // DebugInfo Compiler::impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall) { assert(offs != BAD_IL_OFFSET); bool isStackEmpty = verCurrentState.esStackDepth <= 0; return DebugInfo(compInlineContext, ILLocation(offs, isStackEmpty, isCall)); } //------------------------------------------------------------------------ // impCurStmtOffsSet: Set the "current debug info" to attach to statements that // we are generating next. // // Arguments: // offs - the IL offset // // Remarks: // This function will be called in the main IL processing loop when it is // determined that we have reached a location in the IL stream for which we // want to report debug information. This is the main way we determine which // statements to report debug info for to the EE: for other statements, they // will have no debug information attached. // inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs) { if (offs == BAD_IL_OFFSET) { impCurStmtDI = DebugInfo(compInlineContext, ILLocation()); } else { impCurStmtDI = impCreateDIWithCurrentStackInfo(offs, false); } } //------------------------------------------------------------------------ // impCanSpillNow: check is it possible to spill all values from eeStack to local variables. // // Arguments: // prevOpcode - last importer opcode // // Return Value: // true if it is legal, false if it could be a sequence that we do not want to divide. bool Compiler::impCanSpillNow(OPCODE prevOpcode) { // Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence. // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed. return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ); } /***************************************************************************** * * Remember the instr offset for the statements * * When we do impAppendTree(tree), we can't set stmt->SetLastILOffset(impCurOpcOffs), * if the append was done because of a partial stack spill, * as some of the trees corresponding to code up to impCurOpcOffs might * still be sitting on the stack. * So we delay calling of SetLastILOffset() until impNoteLastILoffs(). * This should be called when an opcode finally/explicitly causes * impAppendTree(tree) to be called (as opposed to being called because of * a spill caused by the opcode) */ #ifdef DEBUG void Compiler::impNoteLastILoffs() { if (impLastILoffsStmt == nullptr) { // We should have added a statement for the current basic block // Is this assert correct ? assert(impLastStmt); impLastStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); } else { impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); impLastILoffsStmt = nullptr; } } #endif // DEBUG /***************************************************************************** * We don't create any GenTree (excluding spills) for a branch. * For debugging info, we need a placeholder so that we can note * the IL offset in gtStmt.gtStmtOffs. So append an empty statement. */ void Compiler::impNoteBranchOffs() { if (opts.compDbgCode) { impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } } /***************************************************************************** * Locate the next stmt boundary for which we need to record info. * We will have to spill the stack at such boundaries if it is not * already empty. * Returns the next stmt boundary (after the start of the block) */ unsigned Compiler::impInitBlockLineInfo() { /* Assume the block does not correspond with any IL offset. This prevents us from reporting extra offsets. Extra mappings can cause confusing stepping, especially if the extra mapping is a jump-target, and the debugger does not ignore extra mappings, but instead rewinds to the nearest known offset */ impCurStmtOffsSet(BAD_IL_OFFSET); IL_OFFSET blockOffs = compCurBB->bbCodeOffs; if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES)) { impCurStmtOffsSet(blockOffs); } /* Always report IL offset 0 or some tests get confused. Probably a good idea anyways */ if (blockOffs == 0) { impCurStmtOffsSet(blockOffs); } if (!info.compStmtOffsetsCount) { return ~0; } /* Find the lowest explicit stmt boundary within the block */ /* Start looking at an entry that is based on our instr offset */ unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize; if (index >= info.compStmtOffsetsCount) { index = info.compStmtOffsetsCount - 1; } /* If we've guessed too far, back up */ while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs) { index--; } /* If we guessed short, advance ahead */ while (info.compStmtOffsets[index] < blockOffs) { index++; if (index == info.compStmtOffsetsCount) { return info.compStmtOffsetsCount; } } assert(index < info.compStmtOffsetsCount); if (info.compStmtOffsets[index] == blockOffs) { /* There is an explicit boundary for the start of this basic block. So we will start with bbCodeOffs. Else we will wait until we get to the next explicit boundary */ impCurStmtOffsSet(blockOffs); index++; } return index; } /*****************************************************************************/ bool Compiler::impOpcodeIsCallOpcode(OPCODE opcode) { switch (opcode) { case CEE_CALL: case CEE_CALLI: case CEE_CALLVIRT: return true; default: return false; } } /*****************************************************************************/ static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode) { switch (opcode) { case CEE_CALL: case CEE_CALLI: case CEE_CALLVIRT: case CEE_JMP: case CEE_NEWOBJ: case CEE_NEWARR: return true; default: return false; } } /*****************************************************************************/ // One might think it is worth caching these values, but results indicate // that it isn't. // In addition, caching them causes SuperPMI to be unable to completely // encapsulate an individual method context. CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass() { CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr); return refAnyClass; } CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass() { CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE); assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr); return typeHandleClass; } CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle() { CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE); assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr); return argIteratorClass; } CORINFO_CLASS_HANDLE Compiler::impGetStringClass() { CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING); assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr); return stringClass; } CORINFO_CLASS_HANDLE Compiler::impGetObjectClass() { CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT); assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr); return objectClass; } /***************************************************************************** * "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we * set its type to TYP_BYREF when we create it. We know if it can be * changed to TYP_I_IMPL only at the point where we use it */ /* static */ void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2) { if (tree1->IsLocalAddrExpr() != nullptr) { tree1->gtType = TYP_I_IMPL; } if (tree2 && (tree2->IsLocalAddrExpr() != nullptr)) { tree2->gtType = TYP_I_IMPL; } } /***************************************************************************** * TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want * to make that an explicit cast in our trees, so any implicit casts that * exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are * turned into explicit casts here. * We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0) */ GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp) { var_types currType = genActualType(tree->gtType); var_types wantedType = genActualType(dstTyp); if (wantedType != currType) { // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp)) { if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->AsIntCon()->gtIconVal == 0))) { tree->gtType = TYP_I_IMPL; } } #ifdef TARGET_64BIT else if (varTypeIsI(wantedType) && (currType == TYP_INT)) { // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF tree = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } else if ((wantedType == TYP_INT) && varTypeIsI(currType)) { // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT); } #endif // TARGET_64BIT } return tree; } /***************************************************************************** * TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases, * but we want to make that an explicit cast in our trees, so any implicit casts * that exist in the IL are turned into explicit casts here. */ GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp) { if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType)) { tree = gtNewCastNode(dstTyp, tree, false, dstTyp); } return tree; } //------------------------------------------------------------------------ // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray // with a GT_COPYBLK node. // // Arguments: // sig - The InitializeArray signature. // // Return Value: // A pointer to the newly created GT_COPYBLK node if the replacement succeeds or // nullptr otherwise. // // Notes: // The function recognizes the following IL pattern: // ldc <length> or a list of ldc <lower bound>/<length> // newarr or newobj // dup // ldtoken <field handle> // call InitializeArray // The lower bounds need not be constant except when the array rank is 1. // The function recognizes all kinds of arrays thus enabling a small runtime // such as CoreRT to skip providing an implementation for InitializeArray. GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig) { assert(sig->numArgs == 2); GenTree* fieldTokenNode = impStackTop(0).val; GenTree* arrayLocalNode = impStackTop(1).val; // // Verify that the field token is known and valid. Note that It's also // possible for the token to come from reflection, in which case we cannot do // the optimization and must therefore revert to calling the helper. You can // see an example of this in bvt\DynIL\initarray2.exe (in Main). // // Check to see if the ldtoken helper call is what we see here. if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) || (fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD))) { return nullptr; } // Strip helper call away fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode(); if (fieldTokenNode->gtOper == GT_IND) { fieldTokenNode = fieldTokenNode->AsOp()->gtOp1; } // Check for constant if (fieldTokenNode->gtOper != GT_CNS_INT) { return nullptr; } CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle; if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr)) { return nullptr; } // // We need to get the number of elements in the array and the size of each element. // We verify that the newarr statement is exactly what we expect it to be. // If it's not then we just return NULL and we don't optimize this call // // It is possible the we don't have any statements in the block yet. if (impLastStmt == nullptr) { return nullptr; } // // We start by looking at the last statement, making sure it's an assignment, and // that the target of the assignment is the array passed to InitializeArray. // GenTree* arrayAssignment = impLastStmt->GetRootNode(); if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->AsOp()->gtOp1->gtOper != GT_LCL_VAR) || (arrayLocalNode->gtOper != GT_LCL_VAR) || (arrayAssignment->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() != arrayLocalNode->AsLclVarCommon()->GetLclNum())) { return nullptr; } // // Make sure that the object being assigned is a helper call. // GenTree* newArrayCall = arrayAssignment->AsOp()->gtOp2; if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->AsCall()->gtCallType != CT_HELPER)) { return nullptr; } // // Verify that it is one of the new array helpers. // bool isMDArray = false; if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8) #ifdef FEATURE_READYTORUN && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1) #endif ) { if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR)) { return nullptr; } isMDArray = true; } CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->AsCall()->compileTimeHelperArgumentHandle; // // Make sure we found a compile time handle to the array // if (!arrayClsHnd) { return nullptr; } unsigned rank = 0; S_UINT32 numElements; if (isMDArray) { rank = info.compCompHnd->getArrayRank(arrayClsHnd); if (rank == 0) { return nullptr; } GenTreeCall::Use* tokenArg = newArrayCall->AsCall()->gtCallArgs; assert(tokenArg != nullptr); GenTreeCall::Use* numArgsArg = tokenArg->GetNext(); assert(numArgsArg != nullptr); GenTreeCall::Use* argsArg = numArgsArg->GetNext(); assert(argsArg != nullptr); // // The number of arguments should be a constant between 1 and 64. The rank can't be 0 // so at least one length must be present and the rank can't exceed 32 so there can // be at most 64 arguments - 32 lengths and 32 lower bounds. // if ((!numArgsArg->GetNode()->IsCnsIntOrI()) || (numArgsArg->GetNode()->AsIntCon()->IconValue() < 1) || (numArgsArg->GetNode()->AsIntCon()->IconValue() > 64)) { return nullptr; } unsigned numArgs = static_cast<unsigned>(numArgsArg->GetNode()->AsIntCon()->IconValue()); bool lowerBoundsSpecified; if (numArgs == rank * 2) { lowerBoundsSpecified = true; } else if (numArgs == rank) { lowerBoundsSpecified = false; // // If the rank is 1 and a lower bound isn't specified then the runtime creates // a SDArray. Note that even if a lower bound is specified it can be 0 and then // we get a SDArray as well, see the for loop below. // if (rank == 1) { isMDArray = false; } } else { return nullptr; } // // The rank is known to be at least 1 so we can start with numElements being 1 // to avoid the need to special case the first dimension. // numElements = S_UINT32(1); struct Match { static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) && IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs); } static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) && (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) && IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs); } static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) && (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs); } static bool IsComma(GenTree* tree) { return (tree != nullptr) && (tree->OperGet() == GT_COMMA); } }; unsigned argIndex = 0; GenTree* comma; for (comma = argsArg->GetNode(); Match::IsComma(comma); comma = comma->gtGetOp2()) { if (lowerBoundsSpecified) { // // In general lower bounds can be ignored because they're not needed to // calculate the total number of elements. But for single dimensional arrays // we need to know if the lower bound is 0 because in this case the runtime // creates a SDArray and this affects the way the array data offset is calculated. // if (rank == 1) { GenTree* lowerBoundAssign = comma->gtGetOp1(); assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs)); GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2(); if (lowerBoundNode->IsIntegralConst(0)) { isMDArray = false; } } comma = comma->gtGetOp2(); argIndex++; } GenTree* lengthNodeAssign = comma->gtGetOp1(); assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs)); GenTree* lengthNode = lengthNodeAssign->gtGetOp2(); if (!lengthNode->IsCnsIntOrI()) { return nullptr; } numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue()); argIndex++; } assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs)); if (argIndex != numArgs) { return nullptr; } } else { // // Make sure there are exactly two arguments: the array class and // the number of elements. // GenTree* arrayLengthNode; GenTreeCall::Use* args = newArrayCall->AsCall()->gtCallArgs; #ifdef FEATURE_READYTORUN if (newArrayCall->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)) { // Array length is 1st argument for readytorun helper arrayLengthNode = args->GetNode(); } else #endif { // Array length is 2nd argument for regular helper arrayLengthNode = args->GetNext()->GetNode(); } // // This optimization is only valid for a constant array size. // if (arrayLengthNode->gtOper != GT_CNS_INT) { return nullptr; } numElements = S_SIZE_T(arrayLengthNode->AsIntCon()->gtIconVal); if (!info.compCompHnd->isSDArray(arrayClsHnd)) { return nullptr; } } CORINFO_CLASS_HANDLE elemClsHnd; var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd)); // // Note that genTypeSize will return zero for non primitive types, which is exactly // what we want (size will then be 0, and we will catch this in the conditional below). // Note that we don't expect this to fail for valid binaries, so we assert in the // non-verification case (the verification case should not assert but rather correctly // handle bad binaries). This assert is not guarding any specific invariant, but rather // saying that we don't expect this to happen, and if it is hit, we need to investigate // why. // S_UINT32 elemSize(genTypeSize(elementType)); S_UINT32 size = elemSize * S_UINT32(numElements); if (size.IsOverflow()) { return nullptr; } if ((size.Value() == 0) || (varTypeIsGC(elementType))) { return nullptr; } void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value()); if (!initData) { return nullptr; } // // At this point we are ready to commit to implementing the InitializeArray // intrinsic using a struct assignment. Pop the arguments from the stack and // return the struct assignment node. // impPopStack(); impPopStack(); const unsigned blkSize = size.Value(); unsigned dataOffset; if (isMDArray) { dataOffset = eeGetMDArrayDataOffset(rank); } else { dataOffset = eeGetArrayDataOffset(); } GenTree* dstAddr = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL)); GenTree* dst = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, dstAddr, typGetBlkLayout(blkSize)); GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_CONST_PTR, true); #ifdef DEBUG src->gtGetOp1()->AsIntCon()->gtTargetHandle = THT_IntializeArrayIntrinsics; #endif return gtNewBlkOpNode(dst, // dst src, // src false, // volatile true); // copyBlock } GenTree* Compiler::impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig) { assert(sig->numArgs == 1); assert(sig->sigInst.methInstCount == 1); GenTree* fieldTokenNode = impStackTop(0).val; // // Verify that the field token is known and valid. Note that it's also // possible for the token to come from reflection, in which case we cannot do // the optimization and must therefore revert to calling the helper. You can // see an example of this in bvt\DynIL\initarray2.exe (in Main). // // Check to see if the ldtoken helper call is what we see here. if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) || (fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD))) { return nullptr; } // Strip helper call away fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode(); if (fieldTokenNode->gtOper == GT_IND) { fieldTokenNode = fieldTokenNode->AsOp()->gtOp1; } // Check for constant if (fieldTokenNode->gtOper != GT_CNS_INT) { return nullptr; } CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle; if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr)) { return nullptr; } CORINFO_CLASS_HANDLE fieldOwnerHnd = info.compCompHnd->getFieldClass(fieldToken); CORINFO_CLASS_HANDLE fieldClsHnd; var_types fieldElementType = JITtype2varType(info.compCompHnd->getFieldType(fieldToken, &fieldClsHnd, fieldOwnerHnd)); unsigned totalFieldSize; // Most static initialization data fields are of some structure, but it is possible for them to be of various // primitive types as well if (fieldElementType == var_types::TYP_STRUCT) { totalFieldSize = info.compCompHnd->getClassSize(fieldClsHnd); } else { totalFieldSize = genTypeSize(fieldElementType); } // Limit to primitive or enum type - see ArrayNative::GetSpanDataFrom() CORINFO_CLASS_HANDLE targetElemHnd = sig->sigInst.methInst[0]; if (info.compCompHnd->getTypeForPrimitiveValueClass(targetElemHnd) == CORINFO_TYPE_UNDEF) { return nullptr; } const unsigned targetElemSize = info.compCompHnd->getClassSize(targetElemHnd); assert(targetElemSize != 0); const unsigned count = totalFieldSize / targetElemSize; if (count == 0) { return nullptr; } void* data = info.compCompHnd->getArrayInitializationData(fieldToken, totalFieldSize); if (!data) { return nullptr; } // // Ready to commit to the work // impPopStack(); // Turn count and pointer value into constants. GenTree* lengthValue = gtNewIconNode(count, TYP_INT); GenTree* pointerValue = gtNewIconHandleNode((size_t)data, GTF_ICON_CONST_PTR); // Construct ReadOnlySpan<T> to return. CORINFO_CLASS_HANDLE spanHnd = sig->retTypeClass; unsigned spanTempNum = lvaGrabTemp(true DEBUGARG("ReadOnlySpan<T> for CreateSpan<T>")); lvaSetStruct(spanTempNum, spanHnd, false); CORINFO_FIELD_HANDLE pointerFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 0); CORINFO_FIELD_HANDLE lengthFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 1); GenTreeLclFld* pointerField = gtNewLclFldNode(spanTempNum, TYP_BYREF, 0); pointerField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(pointerFieldHnd)); GenTree* pointerFieldAsg = gtNewAssignNode(pointerField, pointerValue); GenTreeLclFld* lengthField = gtNewLclFldNode(spanTempNum, TYP_INT, TARGET_POINTER_SIZE); lengthField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(lengthFieldHnd)); GenTree* lengthFieldAsg = gtNewAssignNode(lengthField, lengthValue); // Now append a few statements the initialize the span impAppendTree(lengthFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); impAppendTree(pointerFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // And finally create a tree that points at the span. return impCreateLocalNode(spanTempNum DEBUGARG(0)); } //------------------------------------------------------------------------ // impIntrinsic: possibly expand intrinsic call into alternate IR sequence // // Arguments: // newobjThis - for constructor calls, the tree for the newly allocated object // clsHnd - handle for the intrinsic method's class // method - handle for the intrinsic method // sig - signature of the intrinsic method // methodFlags - CORINFO_FLG_XXX flags of the intrinsic method // memberRef - the token for the intrinsic method // readonlyCall - true if call has a readonly prefix // tailCall - true if call is in tail position // pConstrainedResolvedToken -- resolved token for constrained call, or nullptr // if call is not constrained // constraintCallThisTransform -- this transform to apply for a constrained call // pIntrinsicName [OUT] -- intrinsic name (see enumeration in namedintrinsiclist.h) // for "traditional" jit intrinsics // isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call // that is amenable to special downstream optimization opportunities // // Returns: // IR tree to use in place of the call, or nullptr if the jit should treat // the intrinsic call like a normal call. // // pIntrinsicName set to non-illegal value if the call is recognized as a // traditional jit intrinsic, even if the intrinsic is not expaned. // // isSpecial set true if the expansion is subject to special // optimizations later in the jit processing // // Notes: // On success the IR tree may be a call to a different method or an inline // sequence. If it is a call, then the intrinsic processing here is responsible // for handling all the special cases, as upon return to impImportCall // expanded intrinsics bypass most of the normal call processing. // // Intrinsics are generally not recognized in minopts and debug codegen. // // However, certain traditional intrinsics are identifed as "must expand" // if there is no fallback implmentation to invoke; these must be handled // in all codegen modes. // // New style intrinsics (where the fallback implementation is in IL) are // identified as "must expand" if they are invoked from within their // own method bodies. // GenTree* Compiler::impIntrinsic(GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef, bool readonlyCall, bool tailCall, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM constraintCallThisTransform, NamedIntrinsic* pIntrinsicName, bool* isSpecialIntrinsic) { assert((methodFlags & CORINFO_FLG_INTRINSIC) != 0); bool mustExpand = false; bool isSpecial = false; NamedIntrinsic ni = NI_Illegal; if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0) { // The recursive non-virtual calls to Jit intrinsics are must-expand by convention. mustExpand = mustExpand || (gtIsRecursiveCall(method) && !(methodFlags & CORINFO_FLG_VIRTUAL)); ni = lookupNamedIntrinsic(method); // We specially support the following on all platforms to allow for dead // code optimization and to more generally support recursive intrinsics. if (ni == NI_IsSupported_True) { assert(sig->numArgs == 0); return gtNewIconNode(true); } if (ni == NI_IsSupported_False) { assert(sig->numArgs == 0); return gtNewIconNode(false); } if (ni == NI_Throw_PlatformNotSupportedException) { return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand); } #ifdef FEATURE_HW_INTRINSICS if ((ni > NI_HW_INTRINSIC_START) && (ni < NI_HW_INTRINSIC_END)) { GenTree* hwintrinsic = impHWIntrinsic(ni, clsHnd, method, sig, mustExpand); if (mustExpand && (hwintrinsic == nullptr)) { return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_NOT_IMPLEMENTED, method, sig, mustExpand); } return hwintrinsic; } if ((ni > NI_SIMD_AS_HWINTRINSIC_START) && (ni < NI_SIMD_AS_HWINTRINSIC_END)) { // These intrinsics aren't defined recursively and so they will never be mustExpand // Instead, they provide software fallbacks that will be executed instead. assert(!mustExpand); return impSimdAsHWIntrinsic(ni, clsHnd, method, sig, newobjThis); } #endif // FEATURE_HW_INTRINSICS } *pIntrinsicName = ni; if (ni == NI_System_StubHelpers_GetStubContext) { // must be done regardless of DbgCode and MinOpts return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL); } if (ni == NI_System_StubHelpers_NextCallReturnAddress) { // For now we just avoid inlining anything into these methods since // this intrinsic is only rarely used. We could do this better if we // wanted to by trying to match which call is the one we need to get // the return address of. info.compHasNextCallRetAddr = true; return new (this, GT_LABEL) GenTree(GT_LABEL, TYP_I_IMPL); } switch (ni) { // CreateSpan must be expanded for NativeAOT case NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan: case NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray: mustExpand |= IsTargetAbi(CORINFO_CORERT_ABI); break; case NI_System_ByReference_ctor: case NI_System_ByReference_get_Value: case NI_System_Activator_AllocatorOf: case NI_System_Activator_DefaultConstructorOf: case NI_System_Object_MethodTableOf: case NI_System_EETypePtr_EETypePtrOf: mustExpand = true; break; default: break; } GenTree* retNode = nullptr; // Under debug and minopts, only expand what is required. // NextCallReturnAddress intrinsic returns the return address of the next call. // If that call is an intrinsic and is expanded, codegen for NextCallReturnAddress will fail. // To avoid that we conservatively expand only required intrinsics in methods that call // the NextCallReturnAddress intrinsic. if (!mustExpand && (opts.OptimizationDisabled() || info.compHasNextCallRetAddr)) { *pIntrinsicName = NI_Illegal; return retNode; } CorInfoType callJitType = sig->retType; var_types callType = JITtype2varType(callJitType); /* First do the intrinsics which are always smaller than a call */ if (ni != NI_Illegal) { assert(retNode == nullptr); switch (ni) { case NI_Array_Address: case NI_Array_Get: case NI_Array_Set: retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, ni); break; case NI_System_String_Equals: { retNode = impStringEqualsOrStartsWith(/*startsWith:*/ false, sig, methodFlags); break; } case NI_System_MemoryExtensions_Equals: case NI_System_MemoryExtensions_SequenceEqual: { retNode = impSpanEqualsOrStartsWith(/*startsWith:*/ false, sig, methodFlags); break; } case NI_System_String_StartsWith: { retNode = impStringEqualsOrStartsWith(/*startsWith:*/ true, sig, methodFlags); break; } case NI_System_MemoryExtensions_StartsWith: { retNode = impSpanEqualsOrStartsWith(/*startsWith:*/ true, sig, methodFlags); break; } case NI_System_MemoryExtensions_AsSpan: case NI_System_String_op_Implicit: { assert(sig->numArgs == 1); isSpecial = impStackTop().val->OperIs(GT_CNS_STR); break; } case NI_System_String_get_Chars: { GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; retNode = gtNewIndexRef(TYP_USHORT, op1, op2); retNode->gtFlags |= GTF_INX_STRING_LAYOUT; break; } case NI_System_String_get_Length: { GenTree* op1 = impPopStack().val; if (op1->OperIs(GT_CNS_STR)) { // Optimize `ldstr + String::get_Length()` to CNS_INT // e.g. "Hello".Length => 5 GenTreeIntCon* iconNode = gtNewStringLiteralLength(op1->AsStrCon()); if (iconNode != nullptr) { retNode = iconNode; break; } } GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_String__stringLen, compCurBB); op1 = arrLen; // Getting the length of a null string should throw op1->gtFlags |= GTF_EXCEPT; retNode = op1; break; } // Implement ByReference Ctor. This wraps the assignment of the ref into a byref-like field // in a value type. The canonical example of this is Span<T>. In effect this is just a // substitution. The parameter byref will be assigned into the newly allocated object. case NI_System_ByReference_ctor: { // Remove call to constructor and directly assign the byref passed // to the call to the first slot of the ByReference struct. GenTree* op1 = impPopStack().val; GenTree* thisptr = newobjThis; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0); GenTree* assign = gtNewAssignNode(field, op1); GenTree* byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1()); assert(byReferenceStruct != nullptr); impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd)); retNode = assign; break; } // Implement ptr value getter for ByReference struct. case NI_System_ByReference_get_Value: { GenTree* op1 = impPopStack().val; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0); retNode = field; break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan: { retNode = impCreateSpanIntrinsic(sig); break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray: { retNode = impInitializeArrayIntrinsic(sig); break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant: { GenTree* op1 = impPopStack().val; if (op1->OperIsConst()) { // op1 is a known constant, replace with 'true'. retNode = gtNewIconNode(1); JITDUMP("\nExpanding RuntimeHelpers.IsKnownConstant to true early\n"); // We can also consider FTN_ADDR and typeof(T) here } else { // op1 is not a known constant, we'll do the expansion in morph retNode = new (this, GT_INTRINSIC) GenTreeIntrinsic(TYP_INT, op1, ni, method); JITDUMP("\nConverting RuntimeHelpers.IsKnownConstant to:\n"); DISPTREE(retNode); } break; } case NI_System_Activator_AllocatorOf: case NI_System_Activator_DefaultConstructorOf: case NI_System_Object_MethodTableOf: case NI_System_EETypePtr_EETypePtrOf: { assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it. CORINFO_RESOLVED_TOKEN resolvedToken; resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; resolvedToken.token = memberRef; resolvedToken.tokenType = CORINFO_TOKENKIND_Method; CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo); GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef), embedInfo.compileTimeHandle); if (rawHandle == nullptr) { return nullptr; } noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL)); unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle")); impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE); GenTree* lclVar = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL); GenTree* lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar); var_types resultType = JITtype2varType(sig->retType); retNode = gtNewOperNode(GT_IND, resultType, lclVarAddr); break; } case NI_System_Span_get_Item: case NI_System_ReadOnlySpan_get_Item: { // Have index, stack pointer-to Span<T> s on the stack. Expand to: // // For Span<T> // Comma // BoundsCheck(index, s->_length) // s->_pointer + index * sizeof(T) // // For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref // // Signature should show one class type parameter, which // we need to examine. assert(sig->sigInst.classInstCount == 1); assert(sig->numArgs == 1); CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0]; const unsigned elemSize = info.compCompHnd->getClassSize(spanElemHnd); assert(elemSize > 0); const bool isReadOnly = (ni == NI_System_ReadOnlySpan_get_Item); JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "", info.compCompHnd->getClassName(spanElemHnd), elemSize); GenTree* index = impPopStack().val; GenTree* ptrToSpan = impPopStack().val; GenTree* indexClone = nullptr; GenTree* ptrToSpanClone = nullptr; assert(genActualType(index) == TYP_INT); assert(ptrToSpan->TypeGet() == TYP_BYREF); #if defined(DEBUG) if (verbose) { printf("with ptr-to-span\n"); gtDispTree(ptrToSpan); printf("and index\n"); gtDispTree(index); } #endif // defined(DEBUG) // We need to use both index and ptr-to-span twice, so clone or spill. index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Span.get_Item index")); ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Span.get_Item ptrToSpan")); // Bounds check CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(clsHnd, 1); const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd); GenTree* length = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset); GenTree* boundsCheck = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, length, SCK_RNGCHK_FAIL); // Element access index = indexClone; #ifdef TARGET_64BIT if (index->OperGet() == GT_CNS_INT) { index->gtType = TYP_I_IMPL; } else { index = gtNewCastNode(TYP_I_IMPL, index, true, TYP_I_IMPL); } #endif if (elemSize != 1) { GenTree* sizeofNode = gtNewIconNode(static_cast<ssize_t>(elemSize), TYP_I_IMPL); index = gtNewOperNode(GT_MUL, TYP_I_IMPL, index, sizeofNode); } CORINFO_FIELD_HANDLE ptrHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); const unsigned ptrOffset = info.compCompHnd->getFieldOffset(ptrHnd); GenTree* data = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset); GenTree* result = gtNewOperNode(GT_ADD, TYP_BYREF, data, index); // Prepare result var_types resultType = JITtype2varType(sig->retType); assert(resultType == result->TypeGet()); retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result); break; } case NI_System_RuntimeTypeHandle_GetValueInternal: { GenTree* op1 = impStackTop(0).val; if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall())) { // Old tree // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle // // New tree // TreeToGetNativeTypeHandle // Remove call to helper and return the native TypeHandle pointer that was the parameter // to that helper. op1 = impPopStack().val; // Get native TypeHandle argument to old helper GenTreeCall::Use* arg = op1->AsCall()->gtCallArgs; assert(arg->GetNext() == nullptr); op1 = arg->GetNode(); retNode = op1; } // Call the regular function. break; } case NI_System_Type_GetTypeFromHandle: { GenTree* op1 = impStackTop(0).val; CorInfoHelpFunc typeHandleHelper; if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall(), &typeHandleHelper)) { op1 = impPopStack().val; // Replace helper with a more specialized helper that returns RuntimeType if (typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE) { typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE; } else { assert(typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL); typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL; } assert(op1->AsCall()->gtCallArgs->GetNext() == nullptr); op1 = gtNewHelperCallNode(typeHandleHelper, TYP_REF, op1->AsCall()->gtCallArgs); op1->gtType = TYP_REF; retNode = op1; } break; } case NI_System_Type_op_Equality: case NI_System_Type_op_Inequality: { JITDUMP("Importing Type.op_*Equality intrinsic\n"); GenTree* op1 = impStackTop(1).val; GenTree* op2 = impStackTop(0).val; GenTree* optTree = gtFoldTypeEqualityCall(ni == NI_System_Type_op_Equality, op1, op2); if (optTree != nullptr) { // Success, clean up the evaluation stack. impPopStack(); impPopStack(); // See if we can optimize even further, to a handle compare. optTree = gtFoldTypeCompare(optTree); // See if we can now fold a handle compare to a constant. optTree = gtFoldExpr(optTree); retNode = optTree; } else { // Retry optimizing these later isSpecial = true; } break; } case NI_System_Enum_HasFlag: { GenTree* thisOp = impStackTop(1).val; GenTree* flagOp = impStackTop(0).val; GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp); if (optTree != nullptr) { // Optimization successful. Pop the stack for real. impPopStack(); impPopStack(); retNode = optTree; } else { // Retry optimizing this during morph. isSpecial = true; } break; } case NI_System_Type_IsAssignableFrom: { GenTree* typeTo = impStackTop(1).val; GenTree* typeFrom = impStackTop(0).val; retNode = impTypeIsAssignable(typeTo, typeFrom); break; } case NI_System_Type_IsAssignableTo: { GenTree* typeTo = impStackTop(0).val; GenTree* typeFrom = impStackTop(1).val; retNode = impTypeIsAssignable(typeTo, typeFrom); break; } case NI_System_Type_get_IsValueType: { // Optimize // // call Type.GetTypeFromHandle (which is replaced with CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE) // call Type.IsValueType // // to `true` or `false` // e.g. `typeof(int).IsValueType` => `true` if (impStackTop().val->IsCall()) { GenTreeCall* call = impStackTop().val->AsCall(); if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE)) { CORINFO_CLASS_HANDLE hClass = gtGetHelperArgClassHandle(call->gtCallArgs->GetNode()); if (hClass != NO_CLASS_HANDLE) { retNode = gtNewIconNode((eeIsValueClass(hClass) && // pointers are not value types (e.g. typeof(int*).IsValueType is false) info.compCompHnd->asCorInfoType(hClass) != CORINFO_TYPE_PTR) ? 1 : 0); impPopStack(); // drop CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE call } } } break; } case NI_System_Threading_Thread_get_ManagedThreadId: { if (impStackTop().val->OperIs(GT_RET_EXPR)) { GenTreeCall* call = impStackTop().val->AsRetExpr()->gtInlineCandidate->AsCall(); if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) { if (lookupNamedIntrinsic(call->gtCallMethHnd) == NI_System_Threading_Thread_get_CurrentThread) { // drop get_CurrentThread() call impPopStack(); call->ReplaceWith(gtNewNothingNode(), this); retNode = gtNewHelperCallNode(CORINFO_HELP_GETCURRENTMANAGEDTHREADID, TYP_INT); } } } break; } #ifdef TARGET_ARM64 // Intrinsify Interlocked.Or and Interlocked.And only for arm64-v8.1 (and newer) // TODO-CQ: Implement for XArch (https://github.com/dotnet/runtime/issues/32239). case NI_System_Threading_Interlocked_Or: case NI_System_Threading_Interlocked_And: { if (compOpportunisticallyDependsOn(InstructionSet_Atomics)) { assert(sig->numArgs == 2); GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; genTreeOps op = (ni == NI_System_Threading_Interlocked_Or) ? GT_XORR : GT_XAND; retNode = gtNewOperNode(op, genActualType(callType), op1, op2); retNode->gtFlags |= GTF_GLOB_REF | GTF_ASG; } break; } #endif // TARGET_ARM64 #if defined(TARGET_XARCH) || defined(TARGET_ARM64) // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic case NI_System_Threading_Interlocked_CompareExchange: { var_types retType = JITtype2varType(sig->retType); if ((retType == TYP_LONG) && (TARGET_POINTER_SIZE == 4)) { break; } if ((retType != TYP_INT) && (retType != TYP_LONG)) { break; } assert(callType != TYP_STRUCT); assert(sig->numArgs == 3); GenTree* op3 = impPopStack().val; // comparand GenTree* op2 = impPopStack().val; // value GenTree* op1 = impPopStack().val; // location GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3); node->AsCmpXchg()->gtOpLocation->gtFlags |= GTF_DONT_CSE; retNode = node; break; } case NI_System_Threading_Interlocked_Exchange: case NI_System_Threading_Interlocked_ExchangeAdd: { assert(callType != TYP_STRUCT); assert(sig->numArgs == 2); var_types retType = JITtype2varType(sig->retType); if ((retType == TYP_LONG) && (TARGET_POINTER_SIZE == 4)) { break; } if ((retType != TYP_INT) && (retType != TYP_LONG)) { break; } GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; // This creates: // val // XAdd // addr // field (for example) // // In the case where the first argument is the address of a local, we might // want to make this *not* make the var address-taken -- but atomic instructions // on a local are probably pretty useless anyway, so we probably don't care. op1 = gtNewOperNode(ni == NI_System_Threading_Interlocked_ExchangeAdd ? GT_XADD : GT_XCHG, genActualType(callType), op1, op2); op1->gtFlags |= GTF_GLOB_REF | GTF_ASG; retNode = op1; break; } #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) case NI_System_Threading_Interlocked_MemoryBarrier: case NI_System_Threading_Interlocked_ReadMemoryBarrier: { assert(sig->numArgs == 0); GenTree* op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID); op1->gtFlags |= GTF_GLOB_REF | GTF_ASG; // On XARCH `NI_System_Threading_Interlocked_ReadMemoryBarrier` fences need not be emitted. // However, we still need to capture the effect on reordering. if (ni == NI_System_Threading_Interlocked_ReadMemoryBarrier) { op1->gtFlags |= GTF_MEMORYBARRIER_LOAD; } retNode = op1; break; } #ifdef FEATURE_HW_INTRINSICS case NI_System_Math_FusedMultiplyAdd: { #ifdef TARGET_XARCH if (compExactlyDependsOn(InstructionSet_FMA) && supportSIMDTypes()) { assert(varTypeIsFloating(callType)); // We are constructing a chain of intrinsics similar to: // return FMA.MultiplyAddScalar( // Vector128.CreateScalarUnsafe(x), // Vector128.CreateScalarUnsafe(y), // Vector128.CreateScalarUnsafe(z) // ).ToScalar(); GenTree* op3 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* res = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, op3, NI_FMA_MultiplyAddScalar, callJitType, 16); retNode = gtNewSimdHWIntrinsicNode(callType, res, NI_Vector128_ToScalar, callJitType, 16); break; } #elif defined(TARGET_ARM64) if (compExactlyDependsOn(InstructionSet_AdvSimd)) { assert(varTypeIsFloating(callType)); // We are constructing a chain of intrinsics similar to: // return AdvSimd.FusedMultiplyAddScalar( // Vector64.Create{ScalarUnsafe}(z), // Vector64.Create{ScalarUnsafe}(y), // Vector64.Create{ScalarUnsafe}(x) // ).ToScalar(); NamedIntrinsic createVector64 = (callType == TYP_DOUBLE) ? NI_Vector64_Create : NI_Vector64_CreateScalarUnsafe; constexpr unsigned int simdSize = 8; GenTree* op3 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); GenTree* op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); GenTree* op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); // Note that AdvSimd.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 + op2 * op3 // while Math{F}.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 * op2 + op3 retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op3, op2, op1, NI_AdvSimd_FusedMultiplyAddScalar, callJitType, simdSize); retNode = gtNewSimdHWIntrinsicNode(callType, retNode, NI_Vector64_ToScalar, callJitType, simdSize); break; } #endif // TODO-CQ-XArch: Ideally we would create a GT_INTRINSIC node for fma, however, that currently // requires more extensive changes to valuenum to support methods with 3 operands // We want to generate a GT_INTRINSIC node in the case the call can't be treated as // a target intrinsic so that we can still benefit from CSE and constant folding. break; } #endif // FEATURE_HW_INTRINSICS case NI_System_Math_Abs: case NI_System_Math_Acos: case NI_System_Math_Acosh: case NI_System_Math_Asin: case NI_System_Math_Asinh: case NI_System_Math_Atan: case NI_System_Math_Atanh: case NI_System_Math_Atan2: case NI_System_Math_Cbrt: case NI_System_Math_Ceiling: case NI_System_Math_Cos: case NI_System_Math_Cosh: case NI_System_Math_Exp: case NI_System_Math_Floor: case NI_System_Math_FMod: case NI_System_Math_ILogB: case NI_System_Math_Log: case NI_System_Math_Log2: case NI_System_Math_Log10: #ifdef TARGET_ARM64 // ARM64 has fmax/fmin which are IEEE754:2019 minimum/maximum compatible // TODO-XARCH-CQ: Enable this for XARCH when one of the arguments is a constant // so we can then emit maxss/minss and avoid NaN/-0.0 handling case NI_System_Math_Max: case NI_System_Math_Min: #endif case NI_System_Math_Pow: case NI_System_Math_Round: case NI_System_Math_Sin: case NI_System_Math_Sinh: case NI_System_Math_Sqrt: case NI_System_Math_Tan: case NI_System_Math_Tanh: case NI_System_Math_Truncate: { retNode = impMathIntrinsic(method, sig, callType, ni, tailCall); break; } case NI_System_Array_Clone: case NI_System_Collections_Generic_Comparer_get_Default: case NI_System_Collections_Generic_EqualityComparer_get_Default: case NI_System_Object_MemberwiseClone: case NI_System_Threading_Thread_get_CurrentThread: { // Flag for later handling. isSpecial = true; break; } case NI_System_Object_GetType: { JITDUMP("\n impIntrinsic: call to Object.GetType\n"); GenTree* op1 = impStackTop(0).val; // If we're calling GetType on a boxed value, just get the type directly. if (op1->IsBoxedValue()) { JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n"); // Try and clean up the box. Obtain the handle we // were going to pass to the newobj. GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE); if (boxTypeHandle != nullptr) { // Note we don't need to play the TYP_STRUCT games here like // do for LDTOKEN since the return value of this operator is Type, // not RuntimeTypeHandle. impPopStack(); GenTreeCall::Use* helperArgs = gtNewCallArgs(boxTypeHandle); GenTree* runtimeType = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs); retNode = runtimeType; } } // If we have a constrained callvirt with a "box this" transform // we know we have a value class and hence an exact type. // // If so, instead of boxing and then extracting the type, just // construct the type directly. if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) && (constraintCallThisTransform == CORINFO_BOX_THIS)) { // Ensure this is one of the is simple box cases (in particular, rule out nullables). const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass); const bool isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX); if (isSafeToOptimize) { JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n"); impPopStack(); GenTree* typeHandleOp = impTokenToHandle(pConstrainedResolvedToken, nullptr, true /* mustRestoreHandle */); if (typeHandleOp == nullptr) { assert(compDonotInline()); return nullptr; } GenTreeCall::Use* helperArgs = gtNewCallArgs(typeHandleOp); GenTree* runtimeType = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs); retNode = runtimeType; } } #ifdef DEBUG if (retNode != nullptr) { JITDUMP("Optimized result for call to GetType is\n"); if (verbose) { gtDispTree(retNode); } } #endif // Else expand as an intrinsic, unless the call is constrained, // in which case we defer expansion to allow impImportCall do the // special constraint processing. if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr)) { JITDUMP("Expanding as special intrinsic\n"); impPopStack(); op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, ni, method); // Set the CALL flag to indicate that the operator is implemented by a call. // Set also the EXCEPTION flag because the native implementation of // NI_System_Object_GetType intrinsic can throw NullReferenceException. op1->gtFlags |= (GTF_CALL | GTF_EXCEPT); retNode = op1; // Might be further optimizable, so arrange to leave a mark behind isSpecial = true; } if (retNode == nullptr) { JITDUMP("Leaving as normal call\n"); // Might be further optimizable, so arrange to leave a mark behind isSpecial = true; } break; } case NI_System_Array_GetLength: case NI_System_Array_GetLowerBound: case NI_System_Array_GetUpperBound: { // System.Array.GetLength(Int32) method: // public int GetLength(int dimension) // System.Array.GetLowerBound(Int32) method: // public int GetLowerBound(int dimension) // System.Array.GetUpperBound(Int32) method: // public int GetUpperBound(int dimension) // // Only implement these as intrinsics for multi-dimensional arrays. // Only handle constant dimension arguments. GenTree* gtDim = impStackTop().val; GenTree* gtArr = impStackTop(1).val; if (gtDim->IsIntegralConst()) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE arrCls = gtGetClassHandle(gtArr, &isExact, &isNonNull); if (arrCls != NO_CLASS_HANDLE) { unsigned rank = info.compCompHnd->getArrayRank(arrCls); if ((rank > 1) && !info.compCompHnd->isSDArray(arrCls)) { // `rank` is guaranteed to be <=32 (see MAX_RANK in vm\array.h). Any constant argument // is `int` sized. INT64 dimValue = gtDim->AsIntConCommon()->IntegralValue(); assert((unsigned int)dimValue == dimValue); unsigned dim = (unsigned int)dimValue; if (dim < rank) { // This is now known to be a multi-dimension array with a constant dimension // that is in range; we can expand it as an intrinsic. impPopStack().val; // Pop the dim and array object; we already have a pointer to them. impPopStack().val; // Make sure there are no global effects in the array (such as it being a function // call), so we can mark the generated indirection with GTF_IND_INVARIANT. In the // GetUpperBound case we need the cloned object, since we refer to the array // object twice. In the other cases, we don't need to clone. GenTree* gtArrClone = nullptr; if (((gtArr->gtFlags & GTF_GLOB_EFFECT) != 0) || (ni == NI_System_Array_GetUpperBound)) { gtArr = impCloneExpr(gtArr, &gtArrClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("MD intrinsics array")); } switch (ni) { case NI_System_Array_GetLength: { // Generate *(array + offset-to-length-array + sizeof(int) * dim) unsigned offs = eeGetMDArrayLengthOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); retNode = gtNewIndir(TYP_INT, gtAddr); retNode->gtFlags |= GTF_IND_INVARIANT; break; } case NI_System_Array_GetLowerBound: { // Generate *(array + offset-to-bounds-array + sizeof(int) * dim) unsigned offs = eeGetMDArrayLowerBoundOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); retNode = gtNewIndir(TYP_INT, gtAddr); retNode->gtFlags |= GTF_IND_INVARIANT; break; } case NI_System_Array_GetUpperBound: { assert(gtArrClone != nullptr); // Generate: // *(array + offset-to-length-array + sizeof(int) * dim) + // *(array + offset-to-bounds-array + sizeof(int) * dim) - 1 unsigned offs = eeGetMDArrayLowerBoundOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); GenTree* gtLowerBound = gtNewIndir(TYP_INT, gtAddr); gtLowerBound->gtFlags |= GTF_IND_INVARIANT; offs = eeGetMDArrayLengthOffset(rank, dim); gtOffs = gtNewIconNode(offs, TYP_I_IMPL); gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArrClone, gtOffs); GenTree* gtLength = gtNewIndir(TYP_INT, gtAddr); gtLength->gtFlags |= GTF_IND_INVARIANT; GenTree* gtSum = gtNewOperNode(GT_ADD, TYP_INT, gtLowerBound, gtLength); GenTree* gtOne = gtNewIconNode(1, TYP_INT); retNode = gtNewOperNode(GT_SUB, TYP_INT, gtSum, gtOne); break; } default: unreached(); } } } } } break; } case NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness: { assert(sig->numArgs == 1); // We expect the return type of the ReverseEndianness routine to match the type of the // one and only argument to the method. We use a special instruction for 16-bit // BSWAPs since on x86 processors this is implemented as ROR <16-bit reg>, 8. Additionally, // we only emit 64-bit BSWAP instructions on 64-bit archs; if we're asked to perform a // 64-bit byte swap on a 32-bit arch, we'll fall to the default case in the switch block below. switch (sig->retType) { case CorInfoType::CORINFO_TYPE_SHORT: case CorInfoType::CORINFO_TYPE_USHORT: retNode = gtNewCastNode(TYP_INT, gtNewOperNode(GT_BSWAP16, TYP_INT, impPopStack().val), false, callType); break; case CorInfoType::CORINFO_TYPE_INT: case CorInfoType::CORINFO_TYPE_UINT: #ifdef TARGET_64BIT case CorInfoType::CORINFO_TYPE_LONG: case CorInfoType::CORINFO_TYPE_ULONG: #endif // TARGET_64BIT retNode = gtNewOperNode(GT_BSWAP, callType, impPopStack().val); break; default: // This default case gets hit on 32-bit archs when a call to a 64-bit overload // of ReverseEndianness is encountered. In that case we'll let JIT treat this as a standard // method call, where the implementation decomposes the operation into two 32-bit // bswap routines. If the input to the 64-bit function is a constant, then we rely // on inlining + constant folding of 32-bit bswaps to effectively constant fold // the 64-bit call site. break; } break; } // Fold PopCount for constant input case NI_System_Numerics_BitOperations_PopCount: { assert(sig->numArgs == 1); if (impStackTop().val->IsIntegralConst()) { typeInfo argType = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack(); INT64 cns = impPopStack().val->AsIntConCommon()->IntegralValue(); if (argType.IsType(TI_LONG)) { retNode = gtNewIconNode(genCountBits(cns), callType); } else { assert(argType.IsType(TI_INT)); retNode = gtNewIconNode(genCountBits(static_cast<unsigned>(cns)), callType); } } break; } case NI_System_GC_KeepAlive: { retNode = impKeepAliveIntrinsic(impPopStack().val); break; } default: break; } } if (mustExpand && (retNode == nullptr)) { assert(!"Unhandled must expand intrinsic, throwing PlatformNotSupportedException"); return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand); } // Optionally report if this intrinsic is special // (that is, potentially re-optimizable during morph). if (isSpecialIntrinsic != nullptr) { *isSpecialIntrinsic = isSpecial; } return retNode; } GenTree* Compiler::impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom) { // Optimize patterns like: // // typeof(TTo).IsAssignableFrom(typeof(TTFrom)) // valueTypeVar.GetType().IsAssignableFrom(typeof(TTFrom)) // typeof(TTFrom).IsAssignableTo(typeof(TTo)) // typeof(TTFrom).IsAssignableTo(valueTypeVar.GetType()) // // to true/false if (typeTo->IsCall() && typeFrom->IsCall()) { // make sure both arguments are `typeof()` CORINFO_METHOD_HANDLE hTypeof = eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE); if ((typeTo->AsCall()->gtCallMethHnd == hTypeof) && (typeFrom->AsCall()->gtCallMethHnd == hTypeof)) { CORINFO_CLASS_HANDLE hClassTo = gtGetHelperArgClassHandle(typeTo->AsCall()->gtCallArgs->GetNode()); CORINFO_CLASS_HANDLE hClassFrom = gtGetHelperArgClassHandle(typeFrom->AsCall()->gtCallArgs->GetNode()); if (hClassTo == NO_CLASS_HANDLE || hClassFrom == NO_CLASS_HANDLE) { return nullptr; } TypeCompareState castResult = info.compCompHnd->compareTypesForCast(hClassFrom, hClassTo); if (castResult == TypeCompareState::May) { // requires runtime check // e.g. __Canon, COMObjects, Nullable return nullptr; } GenTreeIntCon* retNode = gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0); impPopStack(); // drop both CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE calls impPopStack(); return retNode; } } return nullptr; } GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, var_types callType, NamedIntrinsic intrinsicName, bool tailCall) { GenTree* op1; GenTree* op2; assert(callType != TYP_STRUCT); assert(IsMathIntrinsic(intrinsicName)); op1 = nullptr; #if !defined(TARGET_X86) // Intrinsics that are not implemented directly by target instructions will // be re-materialized as users calls in rationalizer. For prefixed tail calls, // don't do this optimization, because // a) For back compatibility reasons on desktop .NET Framework 4.6 / 4.6.1 // b) It will be non-trivial task or too late to re-materialize a surviving // tail prefixed GT_INTRINSIC as tail call in rationalizer. if (!IsIntrinsicImplementedByUserCall(intrinsicName) || !tailCall) #else // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad // code generation for certain EH constructs. if (!IsIntrinsicImplementedByUserCall(intrinsicName)) #endif { CORINFO_CLASS_HANDLE tmpClass; CORINFO_ARG_LIST_HANDLE arg; var_types op1Type; var_types op2Type; switch (sig->numArgs) { case 1: op1 = impPopStack().val; arg = sig->args; op1Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op1->TypeGet() != genActualType(op1Type)) { assert(varTypeIsFloating(op1)); op1 = gtNewCastNode(callType, op1, false, callType); } op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicName, method); break; case 2: op2 = impPopStack().val; op1 = impPopStack().val; arg = sig->args; op1Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op1->TypeGet() != genActualType(op1Type)) { assert(varTypeIsFloating(op1)); op1 = gtNewCastNode(callType, op1, false, callType); } arg = info.compCompHnd->getArgNext(arg); op2Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op2->TypeGet() != genActualType(op2Type)) { assert(varTypeIsFloating(op2)); op2 = gtNewCastNode(callType, op2, false, callType); } op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicName, method); break; default: NO_WAY("Unsupported number of args for Math Intrinsic"); } if (IsIntrinsicImplementedByUserCall(intrinsicName)) { op1->gtFlags |= GTF_CALL; } } return op1; } //------------------------------------------------------------------------ // lookupNamedIntrinsic: map method to jit named intrinsic value // // Arguments: // method -- method handle for method // // Return Value: // Id for the named intrinsic, or Illegal if none. // // Notes: // method should have CORINFO_FLG_INTRINSIC set in its attributes, // otherwise it is not a named jit intrinsic. // NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method) { const char* className = nullptr; const char* namespaceName = nullptr; const char* enclosingClassName = nullptr; const char* methodName = info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName, &enclosingClassName); JITDUMP("Named Intrinsic "); if (namespaceName != nullptr) { JITDUMP("%s.", namespaceName); } if (enclosingClassName != nullptr) { JITDUMP("%s.", enclosingClassName); } if (className != nullptr) { JITDUMP("%s.", className); } if (methodName != nullptr) { JITDUMP("%s", methodName); } if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr)) { // Check if we are dealing with an MD array's known runtime method CorInfoArrayIntrinsic arrayFuncIndex = info.compCompHnd->getArrayIntrinsicID(method); switch (arrayFuncIndex) { case CorInfoArrayIntrinsic::GET: JITDUMP("ARRAY_FUNC_GET: Recognized\n"); return NI_Array_Get; case CorInfoArrayIntrinsic::SET: JITDUMP("ARRAY_FUNC_SET: Recognized\n"); return NI_Array_Set; case CorInfoArrayIntrinsic::ADDRESS: JITDUMP("ARRAY_FUNC_ADDRESS: Recognized\n"); return NI_Array_Address; default: break; } JITDUMP(": Not recognized, not enough metadata\n"); return NI_Illegal; } JITDUMP(": "); NamedIntrinsic result = NI_Illegal; if (strcmp(namespaceName, "System") == 0) { if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0)) { result = NI_System_Enum_HasFlag; } else if (strcmp(className, "Activator") == 0) { if (strcmp(methodName, "AllocatorOf") == 0) { result = NI_System_Activator_AllocatorOf; } else if (strcmp(methodName, "DefaultConstructorOf") == 0) { result = NI_System_Activator_DefaultConstructorOf; } } else if (strcmp(className, "ByReference`1") == 0) { if (strcmp(methodName, ".ctor") == 0) { result = NI_System_ByReference_ctor; } else if (strcmp(methodName, "get_Value") == 0) { result = NI_System_ByReference_get_Value; } } else if (strcmp(className, "Math") == 0 || strcmp(className, "MathF") == 0) { if (strcmp(methodName, "Abs") == 0) { result = NI_System_Math_Abs; } else if (strcmp(methodName, "Acos") == 0) { result = NI_System_Math_Acos; } else if (strcmp(methodName, "Acosh") == 0) { result = NI_System_Math_Acosh; } else if (strcmp(methodName, "Asin") == 0) { result = NI_System_Math_Asin; } else if (strcmp(methodName, "Asinh") == 0) { result = NI_System_Math_Asinh; } else if (strcmp(methodName, "Atan") == 0) { result = NI_System_Math_Atan; } else if (strcmp(methodName, "Atanh") == 0) { result = NI_System_Math_Atanh; } else if (strcmp(methodName, "Atan2") == 0) { result = NI_System_Math_Atan2; } else if (strcmp(methodName, "Cbrt") == 0) { result = NI_System_Math_Cbrt; } else if (strcmp(methodName, "Ceiling") == 0) { result = NI_System_Math_Ceiling; } else if (strcmp(methodName, "Cos") == 0) { result = NI_System_Math_Cos; } else if (strcmp(methodName, "Cosh") == 0) { result = NI_System_Math_Cosh; } else if (strcmp(methodName, "Exp") == 0) { result = NI_System_Math_Exp; } else if (strcmp(methodName, "Floor") == 0) { result = NI_System_Math_Floor; } else if (strcmp(methodName, "FMod") == 0) { result = NI_System_Math_FMod; } else if (strcmp(methodName, "FusedMultiplyAdd") == 0) { result = NI_System_Math_FusedMultiplyAdd; } else if (strcmp(methodName, "ILogB") == 0) { result = NI_System_Math_ILogB; } else if (strcmp(methodName, "Log") == 0) { result = NI_System_Math_Log; } else if (strcmp(methodName, "Log2") == 0) { result = NI_System_Math_Log2; } else if (strcmp(methodName, "Log10") == 0) { result = NI_System_Math_Log10; } else if (strcmp(methodName, "Max") == 0) { result = NI_System_Math_Max; } else if (strcmp(methodName, "Min") == 0) { result = NI_System_Math_Min; } else if (strcmp(methodName, "Pow") == 0) { result = NI_System_Math_Pow; } else if (strcmp(methodName, "Round") == 0) { result = NI_System_Math_Round; } else if (strcmp(methodName, "Sin") == 0) { result = NI_System_Math_Sin; } else if (strcmp(methodName, "Sinh") == 0) { result = NI_System_Math_Sinh; } else if (strcmp(methodName, "Sqrt") == 0) { result = NI_System_Math_Sqrt; } else if (strcmp(methodName, "Tan") == 0) { result = NI_System_Math_Tan; } else if (strcmp(methodName, "Tanh") == 0) { result = NI_System_Math_Tanh; } else if (strcmp(methodName, "Truncate") == 0) { result = NI_System_Math_Truncate; } } else if (strcmp(className, "GC") == 0) { if (strcmp(methodName, "KeepAlive") == 0) { result = NI_System_GC_KeepAlive; } } else if (strcmp(className, "Array") == 0) { if (strcmp(methodName, "Clone") == 0) { result = NI_System_Array_Clone; } else if (strcmp(methodName, "GetLength") == 0) { result = NI_System_Array_GetLength; } else if (strcmp(methodName, "GetLowerBound") == 0) { result = NI_System_Array_GetLowerBound; } else if (strcmp(methodName, "GetUpperBound") == 0) { result = NI_System_Array_GetUpperBound; } } else if (strcmp(className, "Object") == 0) { if (strcmp(methodName, "MemberwiseClone") == 0) { result = NI_System_Object_MemberwiseClone; } else if (strcmp(methodName, "GetType") == 0) { result = NI_System_Object_GetType; } else if (strcmp(methodName, "MethodTableOf") == 0) { result = NI_System_Object_MethodTableOf; } } else if (strcmp(className, "RuntimeTypeHandle") == 0) { if (strcmp(methodName, "GetValueInternal") == 0) { result = NI_System_RuntimeTypeHandle_GetValueInternal; } } else if (strcmp(className, "Type") == 0) { if (strcmp(methodName, "get_IsValueType") == 0) { result = NI_System_Type_get_IsValueType; } else if (strcmp(methodName, "IsAssignableFrom") == 0) { result = NI_System_Type_IsAssignableFrom; } else if (strcmp(methodName, "IsAssignableTo") == 0) { result = NI_System_Type_IsAssignableTo; } else if (strcmp(methodName, "op_Equality") == 0) { result = NI_System_Type_op_Equality; } else if (strcmp(methodName, "op_Inequality") == 0) { result = NI_System_Type_op_Inequality; } else if (strcmp(methodName, "GetTypeFromHandle") == 0) { result = NI_System_Type_GetTypeFromHandle; } } else if (strcmp(className, "String") == 0) { if (strcmp(methodName, "Equals") == 0) { result = NI_System_String_Equals; } else if (strcmp(methodName, "get_Chars") == 0) { result = NI_System_String_get_Chars; } else if (strcmp(methodName, "get_Length") == 0) { result = NI_System_String_get_Length; } else if (strcmp(methodName, "op_Implicit") == 0) { result = NI_System_String_op_Implicit; } else if (strcmp(methodName, "StartsWith") == 0) { result = NI_System_String_StartsWith; } } else if (strcmp(className, "MemoryExtensions") == 0) { if (strcmp(methodName, "AsSpan") == 0) { result = NI_System_MemoryExtensions_AsSpan; } if (strcmp(methodName, "SequenceEqual") == 0) { result = NI_System_MemoryExtensions_SequenceEqual; } else if (strcmp(methodName, "Equals") == 0) { result = NI_System_MemoryExtensions_Equals; } else if (strcmp(methodName, "StartsWith") == 0) { result = NI_System_MemoryExtensions_StartsWith; } } else if (strcmp(className, "Span`1") == 0) { if (strcmp(methodName, "get_Item") == 0) { result = NI_System_Span_get_Item; } } else if (strcmp(className, "ReadOnlySpan`1") == 0) { if (strcmp(methodName, "get_Item") == 0) { result = NI_System_ReadOnlySpan_get_Item; } } else if (strcmp(className, "EETypePtr") == 0) { if (strcmp(methodName, "EETypePtrOf") == 0) { result = NI_System_EETypePtr_EETypePtrOf; } } } else if (strcmp(namespaceName, "System.Threading") == 0) { if (strcmp(className, "Thread") == 0) { if (strcmp(methodName, "get_CurrentThread") == 0) { result = NI_System_Threading_Thread_get_CurrentThread; } else if (strcmp(methodName, "get_ManagedThreadId") == 0) { result = NI_System_Threading_Thread_get_ManagedThreadId; } } else if (strcmp(className, "Interlocked") == 0) { #ifndef TARGET_ARM64 // TODO-CQ: Implement for XArch (https://github.com/dotnet/runtime/issues/32239). if (strcmp(methodName, "And") == 0) { result = NI_System_Threading_Interlocked_And; } else if (strcmp(methodName, "Or") == 0) { result = NI_System_Threading_Interlocked_Or; } #endif if (strcmp(methodName, "CompareExchange") == 0) { result = NI_System_Threading_Interlocked_CompareExchange; } else if (strcmp(methodName, "Exchange") == 0) { result = NI_System_Threading_Interlocked_Exchange; } else if (strcmp(methodName, "ExchangeAdd") == 0) { result = NI_System_Threading_Interlocked_ExchangeAdd; } else if (strcmp(methodName, "MemoryBarrier") == 0) { result = NI_System_Threading_Interlocked_MemoryBarrier; } else if (strcmp(methodName, "ReadMemoryBarrier") == 0) { result = NI_System_Threading_Interlocked_ReadMemoryBarrier; } } } #if defined(TARGET_XARCH) || defined(TARGET_ARM64) else if (strcmp(namespaceName, "System.Buffers.Binary") == 0) { if ((strcmp(className, "BinaryPrimitives") == 0) && (strcmp(methodName, "ReverseEndianness") == 0)) { result = NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness; } } #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) else if (strcmp(namespaceName, "System.Collections.Generic") == 0) { if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0)) { result = NI_System_Collections_Generic_EqualityComparer_get_Default; } else if ((strcmp(className, "Comparer`1") == 0) && (strcmp(methodName, "get_Default") == 0)) { result = NI_System_Collections_Generic_Comparer_get_Default; } } else if ((strcmp(namespaceName, "System.Numerics") == 0) && (strcmp(className, "BitOperations") == 0)) { if (strcmp(methodName, "PopCount") == 0) { result = NI_System_Numerics_BitOperations_PopCount; } } #ifdef FEATURE_HW_INTRINSICS else if (strcmp(namespaceName, "System.Numerics") == 0) { CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(method, &sig); int sizeOfVectorT = getSIMDVectorRegisterByteLength(); result = SimdAsHWIntrinsicInfo::lookupId(&sig, className, methodName, enclosingClassName, sizeOfVectorT); } #endif // FEATURE_HW_INTRINSICS else if ((strcmp(namespaceName, "System.Runtime.CompilerServices") == 0) && (strcmp(className, "RuntimeHelpers") == 0)) { if (strcmp(methodName, "CreateSpan") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan; } else if (strcmp(methodName, "InitializeArray") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray; } else if (strcmp(methodName, "IsKnownConstant") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant; } } else if (strncmp(namespaceName, "System.Runtime.Intrinsics", 25) == 0) { // We go down this path even when FEATURE_HW_INTRINSICS isn't enabled // so we can specially handle IsSupported and recursive calls. // This is required to appropriately handle the intrinsics on platforms // which don't support them. On such a platform methods like Vector64.Create // will be seen as `Intrinsic` and `mustExpand` due to having a code path // which is recursive. When such a path is hit we expect it to be handled by // the importer and we fire an assert if it wasn't and in previous versions // of the JIT would fail fast. This was changed to throw a PNSE instead but // we still assert as most intrinsics should have been recognized/handled. // In order to avoid the assert, we specially handle the IsSupported checks // (to better allow dead-code optimizations) and we explicitly throw a PNSE // as we know that is the desired behavior for the HWIntrinsics when not // supported. For cases like Vector64.Create, this is fine because it will // be behind a relevant IsSupported check and will never be hit and the // software fallback will be executed instead. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef FEATURE_HW_INTRINSICS namespaceName += 25; const char* platformNamespaceName; #if defined(TARGET_XARCH) platformNamespaceName = ".X86"; #elif defined(TARGET_ARM64) platformNamespaceName = ".Arm"; #else #error Unsupported platform #endif if ((namespaceName[0] == '\0') || (strcmp(namespaceName, platformNamespaceName) == 0)) { CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(method, &sig); result = HWIntrinsicInfo::lookupId(this, &sig, className, methodName, enclosingClassName); } #endif // FEATURE_HW_INTRINSICS if (result == NI_Illegal) { if ((strcmp(methodName, "get_IsSupported") == 0) || (strcmp(methodName, "get_IsHardwareAccelerated") == 0)) { // This allows the relevant code paths to be dropped as dead code even // on platforms where FEATURE_HW_INTRINSICS is not supported. result = NI_IsSupported_False; } else if (gtIsRecursiveCall(method)) { // For the framework itself, any recursive intrinsics will either be // only supported on a single platform or will be guarded by a relevant // IsSupported check so the throw PNSE will be valid or dropped. result = NI_Throw_PlatformNotSupportedException; } } } else if (strcmp(namespaceName, "System.StubHelpers") == 0) { if (strcmp(className, "StubHelpers") == 0) { if (strcmp(methodName, "GetStubContext") == 0) { result = NI_System_StubHelpers_GetStubContext; } else if (strcmp(methodName, "NextCallReturnAddress") == 0) { result = NI_System_StubHelpers_NextCallReturnAddress; } } } if (result == NI_Illegal) { JITDUMP("Not recognized\n"); } else if (result == NI_IsSupported_False) { JITDUMP("Unsupported - return false"); } else if (result == NI_Throw_PlatformNotSupportedException) { JITDUMP("Unsupported - throw PlatformNotSupportedException"); } else { JITDUMP("Recognized\n"); } return result; } //------------------------------------------------------------------------ // impUnsupportedNamedIntrinsic: Throws an exception for an unsupported named intrinsic // // Arguments: // helper - JIT helper ID for the exception to be thrown // method - method handle of the intrinsic function. // sig - signature of the intrinsic call // mustExpand - true if the intrinsic must return a GenTree*; otherwise, false // // Return Value: // a gtNewMustThrowException if mustExpand is true; otherwise, nullptr // GenTree* Compiler::impUnsupportedNamedIntrinsic(unsigned helper, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand) { // We've hit some error case and may need to return a node for the given error. // // When `mustExpand=false`, we are attempting to inline the intrinsic directly into another method. In this // scenario, we need to return `nullptr` so that a GT_CALL to the intrinsic is emitted instead. This is to // ensure that everything continues to behave correctly when optimizations are enabled (e.g. things like the // inliner may expect the node we return to have a certain signature, and the `MustThrowException` node won't // match that). // // When `mustExpand=true`, we are in a GT_CALL to the intrinsic and are attempting to JIT it. This will generally // be in response to an indirect call (e.g. done via reflection) or in response to an earlier attempt returning // `nullptr` (under `mustExpand=false`). In that scenario, we are safe to return the `MustThrowException` node. if (mustExpand) { for (unsigned i = 0; i < sig->numArgs; i++) { impPopStack(); } return gtNewMustThrowException(helper, JITtype2varType(sig->retType), sig->retTypeClass); } else { return nullptr; } } /*****************************************************************************/ GenTree* Compiler::impArrayAccessIntrinsic( CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, NamedIntrinsic intrinsicName) { /* If we are generating SMALL_CODE, we don't want to use intrinsics for the following, as it generates fatter code. */ if (compCodeOpt() == SMALL_CODE) { return nullptr; } /* These intrinsics generate fatter (but faster) code and are only done if we don't need SMALL_CODE */ unsigned rank = (intrinsicName == NI_Array_Set) ? (sig->numArgs - 1) : sig->numArgs; // The rank 1 case is special because it has to handle two array formats // we will simply not do that case if (rank > GT_ARR_MAX_RANK || rank <= 1) { return nullptr; } CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr; var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd)); // For the ref case, we will only be able to inline if the types match // (verifier checks for this, we don't care for the nonverified case and the // type is final (so we don't need to do the cast) if ((intrinsicName != NI_Array_Get) && !readonlyCall && varTypeIsGC(elemType)) { // Get the call site signature CORINFO_SIG_INFO LocalSig; eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig); assert(LocalSig.hasThis()); CORINFO_CLASS_HANDLE actualElemClsHnd; if (intrinsicName == NI_Array_Set) { // Fetch the last argument, the one that indicates the type we are setting. CORINFO_ARG_LIST_HANDLE argType = LocalSig.args; for (unsigned r = 0; r < rank; r++) { argType = info.compCompHnd->getArgNext(argType); } typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType); actualElemClsHnd = argInfo.GetClassHandle(); } else { assert(intrinsicName == NI_Array_Address); // Fetch the return type typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass); assert(retInfo.IsByRef()); actualElemClsHnd = retInfo.GetClassHandle(); } // if it's not final, we can't do the optimization if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL)) { return nullptr; } } unsigned arrayElemSize; if (elemType == TYP_STRUCT) { assert(arrElemClsHnd); arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd); } else { arrayElemSize = genTypeSize(elemType); } if ((unsigned char)arrayElemSize != arrayElemSize) { // arrayElemSize would be truncated as an unsigned char. // This means the array element is too large. Don't do the optimization. return nullptr; } GenTree* val = nullptr; if (intrinsicName == NI_Array_Set) { // Assignment of a struct is more work, and there are more gets than sets. if (elemType == TYP_STRUCT) { return nullptr; } val = impPopStack().val; assert(genActualType(elemType) == genActualType(val->gtType) || (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) || (elemType == TYP_INT && val->gtType == TYP_BYREF) || (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT)); } noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK); GenTree* inds[GT_ARR_MAX_RANK]; for (unsigned k = rank; k > 0; k--) { inds[k - 1] = impPopStack().val; } GenTree* arr = impPopStack().val; assert(arr->gtType == TYP_REF); GenTree* arrElem = new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank), static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]); if (intrinsicName != NI_Array_Address) { if (varTypeIsStruct(elemType)) { arrElem = gtNewObjNode(sig->retTypeClass, arrElem); } else { arrElem = gtNewOperNode(GT_IND, elemType, arrElem); } } if (intrinsicName == NI_Array_Set) { assert(val != nullptr); return gtNewAssignNode(arrElem, val); } else { return arrElem; } } //------------------------------------------------------------------------ // impKeepAliveIntrinsic: Import the GC.KeepAlive intrinsic call // // Imports the intrinsic as a GT_KEEPALIVE node, and, as an optimization, // if the object to keep alive is a GT_BOX, removes its side effects and // uses the address of a local (copied from the box's source if needed) // as the operand for GT_KEEPALIVE. For the BOX optimization, if the class // of the box has no GC fields, a GT_NOP is returned. // // Arguments: // objToKeepAlive - the intrinisic call's argument // // Return Value: // The imported GT_KEEPALIVE or GT_NOP - see description. // GenTree* Compiler::impKeepAliveIntrinsic(GenTree* objToKeepAlive) { assert(objToKeepAlive->TypeIs(TYP_REF)); if (opts.OptimizationEnabled() && objToKeepAlive->IsBoxedValue()) { CORINFO_CLASS_HANDLE boxedClass = lvaGetDesc(objToKeepAlive->AsBox()->BoxOp()->AsLclVar())->lvClassHnd; ClassLayout* layout = typGetObjLayout(boxedClass); if (!layout->HasGCPtr()) { gtTryRemoveBoxUpstreamEffects(objToKeepAlive, BR_REMOVE_AND_NARROW); JITDUMP("\nBOX class has no GC fields, KEEPALIVE is a NOP"); return gtNewNothingNode(); } GenTree* boxSrc = gtTryRemoveBoxUpstreamEffects(objToKeepAlive, BR_REMOVE_BUT_NOT_NARROW); if (boxSrc != nullptr) { unsigned boxTempNum; if (boxSrc->OperIs(GT_LCL_VAR)) { boxTempNum = boxSrc->AsLclVarCommon()->GetLclNum(); } else { boxTempNum = lvaGrabTemp(true DEBUGARG("Temp for the box source")); GenTree* boxTempAsg = gtNewTempAssign(boxTempNum, boxSrc); Statement* boxAsgStmt = objToKeepAlive->AsBox()->gtCopyStmtWhenInlinedBoxValue; boxAsgStmt->SetRootNode(boxTempAsg); } JITDUMP("\nImporting KEEPALIVE(BOX) as KEEPALIVE(ADDR(LCL_VAR V%02u))", boxTempNum); GenTree* boxTemp = gtNewLclvNode(boxTempNum, boxSrc->TypeGet()); GenTree* boxTempAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, boxTemp); return gtNewKeepAliveNode(boxTempAddr); } } return gtNewKeepAliveNode(objToKeepAlive); } bool Compiler::verMergeEntryStates(BasicBlock* block, bool* changed) { unsigned i; // do some basic checks first if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth) { return false; } if (verCurrentState.esStackDepth > 0) { // merge stack types StackEntry* parentStack = block->bbStackOnEntry(); StackEntry* childStack = verCurrentState.esStack; for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++) { if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == false) { return false; } } } // merge initialization status of this ptr if (verTrackObjCtorInitState) { // If we're tracking the CtorInitState, then it must not be unknown in the current state. assert(verCurrentState.thisInitialized != TIS_Bottom); // If the successor block's thisInit state is unknown, copy it from the current state. if (block->bbThisOnEntry() == TIS_Bottom) { *changed = true; verSetThisInit(block, verCurrentState.thisInitialized); } else if (verCurrentState.thisInitialized != block->bbThisOnEntry()) { if (block->bbThisOnEntry() != TIS_Top) { *changed = true; verSetThisInit(block, TIS_Top); if (block->bbFlags & BBF_FAILED_VERIFICATION) { // The block is bad. Control can flow through the block to any handler that catches the // verification exception, but the importer ignores bad blocks and therefore won't model // this flow in the normal way. To complete the merge into the bad block, the new state // needs to be manually pushed to the handlers that may be reached after the verification // exception occurs. // // Usually, the new state was already propagated to the relevant handlers while processing // the predecessors of the bad block. The exception is when the bad block is at the start // of a try region, meaning it is protected by additional handlers that do not protect its // predecessors. // if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0)) { // Push TIS_Top to the handlers that protect the bad block. Note that this can cause // recursive calls back into this code path (if successors of the current bad block are // also bad blocks). // ThisInitState origTIS = verCurrentState.thisInitialized; verCurrentState.thisInitialized = TIS_Top; impVerifyEHBlock(block, true); verCurrentState.thisInitialized = origTIS; } } } } } else { assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom); } return true; } /***************************************************************************** * 'logMsg' is true if a log message needs to be logged. false if the caller has * already logged it (presumably in a more detailed fashion than done here) */ void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)) { block->bbJumpKind = BBJ_THROW; block->bbFlags |= BBF_FAILED_VERIFICATION; block->bbFlags &= ~BBF_IMPORTED; impCurStmtOffsSet(block->bbCodeOffs); // Clear the statement list as it exists so far; we're only going to have a verification exception. impStmtList = impLastStmt = nullptr; #ifdef DEBUG if (logMsg) { JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName, block->bbCodeOffs, block->bbCodeOffsEnd)); if (verbose) { printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs); } } if (JitConfig.DebugBreakOnVerificationFailure()) { DebugBreak(); } #endif impBeginTreeList(); // if the stack is non-empty evaluate all the side-effects if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } assert(verCurrentState.esStackDepth == 0); GenTree* op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewCallArgs(gtNewIconNode(block->bbCodeOffs))); // verCurrentState.esStackDepth = 0; impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // The inliner is not able to handle methods that require throw block, so // make sure this methods never gets inlined. info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE); } /***************************************************************************** * */ void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)) { verResetCurrentState(block, &verCurrentState); verConvertBBToThrowVerificationException(block DEBUGARG(logMsg)); #ifdef DEBUG impNoteLastILoffs(); // Remember at which BC offset the tree was finished #endif // DEBUG } /******************************************************************************/ typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd) { assert(ciType < CORINFO_TYPE_COUNT); typeInfo tiResult; switch (ciType) { case CORINFO_TYPE_STRING: case CORINFO_TYPE_CLASS: tiResult = verMakeTypeInfo(clsHnd); if (!tiResult.IsType(TI_REF)) { // type must be consistent with element type return typeInfo(); } break; #ifdef TARGET_64BIT case CORINFO_TYPE_NATIVEINT: case CORINFO_TYPE_NATIVEUINT: if (clsHnd) { // If we have more precise information, use it return verMakeTypeInfo(clsHnd); } else { return typeInfo::nativeInt(); } break; #endif // TARGET_64BIT case CORINFO_TYPE_VALUECLASS: case CORINFO_TYPE_REFANY: tiResult = verMakeTypeInfo(clsHnd); // type must be constant with element type; if (!tiResult.IsValueClass()) { return typeInfo(); } break; case CORINFO_TYPE_VAR: return verMakeTypeInfo(clsHnd); case CORINFO_TYPE_PTR: // for now, pointers are treated as an error case CORINFO_TYPE_VOID: return typeInfo(); break; case CORINFO_TYPE_BYREF: { CORINFO_CLASS_HANDLE childClassHandle; CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle); return ByRef(verMakeTypeInfo(childType, childClassHandle)); } break; default: if (clsHnd) { // If we have more precise information, use it return typeInfo(TI_STRUCT, clsHnd); } else { return typeInfo(JITtype2tiType(ciType)); } } return tiResult; } /******************************************************************************/ typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */) { if (clsHnd == nullptr) { return typeInfo(); } // Byrefs should only occur in method and local signatures, which are accessed // using ICorClassInfo and ICorClassInfo.getChildType. // So findClass() and getClassAttribs() should not be called for byrefs if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF) { assert(!"Did findClass() return a Byref?"); return typeInfo(); } unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd); if (attribs & CORINFO_FLG_VALUECLASS) { CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd); // Meta-data validation should ensure that CORINF_TYPE_BYREF should // not occur here, so we may want to change this to an assert instead. if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR) { return typeInfo(); } #ifdef TARGET_64BIT if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT) { return typeInfo::nativeInt(); } #endif // TARGET_64BIT if (t != CORINFO_TYPE_UNDEF) { return (typeInfo(JITtype2tiType(t))); } else if (bashStructToRef) { return (typeInfo(TI_REF, clsHnd)); } else { return (typeInfo(TI_STRUCT, clsHnd)); } } else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE) { // See comment in _typeInfo.h for why we do it this way. return (typeInfo(TI_REF, clsHnd, true)); } else { return (typeInfo(TI_REF, clsHnd)); } } /******************************************************************************/ bool Compiler::verIsSDArray(const typeInfo& ti) { if (ti.IsNullObjRef()) { // nulls are SD arrays return true; } if (!ti.IsType(TI_REF)) { return false; } if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef())) { return false; } return true; } /******************************************************************************/ /* Given 'arrayObjectType' which is an array type, fetch the element type. */ /* Returns an error type if anything goes wrong */ typeInfo Compiler::verGetArrayElemType(const typeInfo& arrayObjectType) { assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explicitly since that is a success case if (!verIsSDArray(arrayObjectType)) { return typeInfo(); } CORINFO_CLASS_HANDLE childClassHandle = nullptr; CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle); return verMakeTypeInfo(ciType, childClassHandle); } /***************************************************************************** */ typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args) { CORINFO_CLASS_HANDLE classHandle; CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle)); var_types type = JITtype2varType(ciType); if (varTypeIsGC(type)) { // For efficiency, getArgType only returns something in classHandle for // value types. For other types that have addition type info, you // have to call back explicitly classHandle = info.compCompHnd->getArgClass(sig, args); if (!classHandle) { NO_WAY("Could not figure out Class specified in argument or local signature"); } } return verMakeTypeInfo(ciType, classHandle); } bool Compiler::verIsByRefLike(const typeInfo& ti) { if (ti.IsByRef()) { return true; } if (!ti.IsType(TI_STRUCT)) { return false; } return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_BYREF_LIKE; } bool Compiler::verIsSafeToReturnByRef(const typeInfo& ti) { if (ti.IsPermanentHomeByRef()) { return true; } else { return false; } } bool Compiler::verIsBoxable(const typeInfo& ti) { return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables || ti.IsUnboxedGenericTypeVar() || (ti.IsType(TI_STRUCT) && // exclude byreflike structs !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_BYREF_LIKE))); } // Is it a boxed value type? bool Compiler::verIsBoxedValueType(const typeInfo& ti) { if (ti.GetType() == TI_REF) { CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef(); return !!eeIsValueClass(clsHnd); } else { return false; } } /***************************************************************************** * * Check if a TailCall is legal. */ bool Compiler::verCheckTailCallConstraint( OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter? bool speculative // If true, won't throw if verificatoin fails. Instead it will // return false to the caller. // If false, it will throw. ) { DWORD mflags; CORINFO_SIG_INFO sig; unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so // this counter is used to keep track of how many items have been // virtually popped CORINFO_METHOD_HANDLE methodHnd = nullptr; CORINFO_CLASS_HANDLE methodClassHnd = nullptr; unsigned methodClassFlgs = 0; assert(impOpcodeIsCallOpcode(opcode)); if (compIsForInlining()) { return false; } // for calli, VerifyOrReturn that this is not a virtual method if (opcode == CEE_CALLI) { /* Get the call sig */ eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); // We don't know the target method, so we have to infer the flags, or // assume the worst-case. mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC; } else { methodHnd = pResolvedToken->hMethod; mflags = info.compCompHnd->getMethodAttribs(methodHnd); // When verifying generic code we pair the method handle with its // owning class to get the exact method signature. methodClassHnd = pResolvedToken->hClass; assert(methodClassHnd); eeGetMethodSig(methodHnd, &sig, methodClassHnd); // opcode specific check methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd); } // We must have got the methodClassHnd if opcode is not CEE_CALLI assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI); if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); } // check compatibility of the arguments unsigned int argCount; argCount = sig.numArgs; CORINFO_ARG_LIST_HANDLE args; args = sig.args; while (argCount--) { typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack(); // check that the argument is not a byref for tailcalls VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative); // For unsafe code, we might have parameters containing pointer to the stack location. // Disallow the tailcall for this kind. CORINFO_CLASS_HANDLE classHandle; CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle)); VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative); args = info.compCompHnd->getArgNext(args); } // update popCount popCount += sig.numArgs; // check for 'this' which is on non-static methods, not called via NEWOBJ if (!(mflags & CORINFO_FLG_STATIC)) { // Always update the popCount. // This is crucial for the stack calculation to be correct. typeInfo tiThis = impStackTop(popCount).seTypeInfo; popCount++; if (opcode == CEE_CALLI) { // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object // on the stack. if (tiThis.IsValueClass()) { tiThis.MakeByRef(); } VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative); } else { // Check type compatibility of the this argument typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); } VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative); } } // Tail calls on constrained calls should be illegal too: // when instantiated at a value type, a constrained call may pass the address of a stack allocated value VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative); // Get the exact view of the signature for an array method if (sig.retType != CORINFO_TYPE_VOID) { if (methodClassFlgs & CORINFO_FLG_ARRAY) { assert(opcode != CEE_CALLI); eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); } } typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass); typeInfo tiCallerRetType = verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass); // void return type gets morphed into the error type, so we have to treat them specially here if (sig.retType == CORINFO_TYPE_VOID) { VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch", speculative); } else { VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType), NormaliseForStack(tiCallerRetType), true), "tailcall return mismatch", speculative); } // for tailcall, stack must be empty VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative); return true; // Yes, tailcall is legal } /***************************************************************************** * * Checks the IL verification rules for the call */ void Compiler::verVerifyCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, bool tailCall, bool readonlyCall, const BYTE* delegateCreateStart, const BYTE* codeAddr, CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName)) { DWORD mflags; CORINFO_SIG_INFO* sig = nullptr; unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so // this counter is used to keep track of how many items have been // virtually popped // for calli, VerifyOrReturn that this is not a virtual method if (opcode == CEE_CALLI) { Verify(false, "Calli not verifiable"); return; } //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item. mflags = callInfo->verMethodFlags; sig = &callInfo->verSig; if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); } // opcode specific check unsigned methodClassFlgs = callInfo->classFlags; switch (opcode) { case CEE_CALLVIRT: // cannot do callvirt on valuetypes VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class"); VerifyOrReturn(sig->hasThis(), "CallVirt on static method"); break; case CEE_NEWOBJ: { assert(!tailCall); // Importer should not allow this VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC), "newobj must be on instance"); if (methodClassFlgs & CORINFO_FLG_DELEGATE) { VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor"); typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack(); typeInfo tiDeclaredFtn = verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack(); VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type"); assert(popCount == 0); typeInfo tiActualObj = impStackTop(1).seTypeInfo; typeInfo tiActualFtn = impStackTop(0).seTypeInfo; VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg"); VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch"); VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF), "delegate object type mismatch"); CORINFO_CLASS_HANDLE objTypeHandle = tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef(); // the method signature must be compatible with the delegate's invoke method // check that for virtual functions, the type of the object used to get the // ftn ptr is the same as the type of the object passed to the delegate ctor. // since this is a bit of work to determine in general, we pattern match stylized // code sequences // the delegate creation code check, which used to be done later, is now done here // so we can read delegateMethodRef directly from // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence; // we then use it in our call to isCompatibleDelegate(). mdMemberRef delegateMethodRef = mdMemberRefNil; VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef), "must create delegates with certain IL"); CORINFO_RESOLVED_TOKEN delegateResolvedToken; delegateResolvedToken.tokenContext = impTokenLookupContextHandle; delegateResolvedToken.tokenScope = info.compScopeHnd; delegateResolvedToken.token = delegateMethodRef; delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method; info.compCompHnd->resolveToken(&delegateResolvedToken); CORINFO_CALL_INFO delegateCallInfo; eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */, CORINFO_CALLINFO_SECURITYCHECKS, &delegateCallInfo); bool isOpenDelegate = false; VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass, tiActualFtn.GetMethod(), pResolvedToken->hClass, &isOpenDelegate), "function incompatible with delegate"); // check the constraints on the target method VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass), "delegate target has unsatisfied class constraints"); VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass, tiActualFtn.GetMethod()), "delegate target has unsatisfied method constraints"); // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch) // for additional verification rules for delegates CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod(); DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle); if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr)) { if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)) { VerifyOrReturn((tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly()) || verIsBoxedValueType(tiActualObj), "The 'this' parameter to the call must be either the calling method's " "'this' parameter or " "a boxed value type."); } } if (actualMethodAttribs & CORINFO_FLG_PROTECTED) { bool targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC; Verify(targetIsStatic || !isOpenDelegate, "Unverifiable creation of an open instance delegate for a protected member."); CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic) ? info.compClassHnd : tiActualObj.GetClassHandleForObjRef(); // In the case of protected methods, it is a requirement that the 'this' // pointer be a subclass of the current context. Perform this check. Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd), "Accessing protected method through wrong type."); } goto DONE_ARGS; } } // fall thru to default checks FALLTHROUGH; default: VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract"); } VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)), "can only newobj a delegate constructor"); // check compatibility of the arguments unsigned int argCount; argCount = sig->numArgs; CORINFO_ARG_LIST_HANDLE args; args = sig->args; while (argCount--) { typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo; typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack(); VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch"); args = info.compCompHnd->getArgNext(args); } DONE_ARGS: // update popCount popCount += sig->numArgs; // check for 'this' which are is non-static methods, not called via NEWOBJ CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd; if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ)) { typeInfo tiThis = impStackTop(popCount).seTypeInfo; popCount++; // If it is null, we assume we can access it (since it will AV shortly) // If it is anything but a reference class, there is no hierarchy, so // again, we don't need the precise instance class to compute 'protected' access if (tiThis.IsType(TI_REF)) { instanceClassHnd = tiThis.GetClassHandleForObjRef(); } // Check type compatibility of the this argument typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); } // If this is a call to the base class .ctor, set thisPtr Init for // this block. if (mflags & CORINFO_FLG_CONSTRUCTOR) { if (verTrackObjCtorInitState && tiThis.IsThisPtr() && verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass)) { assert(verCurrentState.thisInitialized != TIS_Bottom); // This should never be the case just from the logic of the verifier. VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit, "Call to base class constructor when 'this' is possibly initialized"); // Otherwise, 'this' is now initialized. verCurrentState.thisInitialized = TIS_Init; tiThis.SetInitialisedObjRef(); } else { // We allow direct calls to value type constructors // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a // constrained callvirt to illegally re-enter a .ctor on a value of reference type. VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(), "Bad call to a constructor"); } } if (pConstrainedResolvedToken != nullptr) { VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call"); typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass); // We just dereference this and test for equality tiThis.DereferenceByRef(); VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint), "this type mismatch with constrained type operand"); // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass); } // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef()) { tiDeclaredThis.SetIsReadonlyByRef(); } VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch"); if (tiThis.IsByRef()) { // Find the actual type where the method exists (as opposed to what is declared // in the metadata). This is to prevent passing a byref as the "this" argument // while calling methods like System.ValueType.GetHashCode() which expect boxed objects. CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod); VerifyOrReturn(eeIsValueClass(actualClassHnd), "Call to base type of valuetype (which is never a valuetype)"); } // Rules for non-virtual call to a non-final virtual method: // Define: // The "this" pointer is considered to be "possibly written" if // 1. Its address have been taken (LDARGA 0) anywhere in the method. // (or) // 2. It has been stored to (STARG.0) anywhere in the method. // A non-virtual call to a non-final virtual method is only allowed if // 1. The this pointer passed to the callee is an instance of a boxed value type. // (or) // 2. The this pointer passed to the callee is the current method's this pointer. // (and) The current method's this pointer is not "possibly written". // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to // virtual methods. (Luckily this does affect .ctors, since they are not virtual). // This is stronger that is strictly needed, but implementing a laxer rule is significantly // hard and more error prone. if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)) { VerifyOrReturn((tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly()) || verIsBoxedValueType(tiThis), "The 'this' parameter to the call must be either the calling method's 'this' parameter or " "a boxed value type."); } } // check any constraints on the callee's class and type parameters VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass), "method has unsatisfied class constraints"); VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod), "method has unsatisfied method constraints"); if (mflags & CORINFO_FLG_PROTECTED) { VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd), "Can't access protected method"); } // Get the exact view of the signature for an array method if (sig->retType != CORINFO_TYPE_VOID) { eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass); } // "readonly." prefixed calls only allowed for the Address operation on arrays. // The methods supported by array types are under the control of the EE // so we can trust that only the Address operation returns a byref. if (readonlyCall) { typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass); VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(), "unexpected use of readonly prefix"); } // Verify the tailcall if (tailCall) { verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false); } } /***************************************************************************** * Checks that a delegate creation is done using the following pattern: * dup * ldvirtftn targetMemberRef * OR * ldftn targetMemberRef * * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if * not in this basic block) * * targetMemberRef is read from the code sequence. * targetMemberRef is validated iff verificationNeeded. */ bool Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef) { if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr)) { targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]); return true; } else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr)) { targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]); return true; } return false; } typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType) { Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref"); typeInfo ptrVal = verVerifyLDIND(tiTo, instrType); typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack(); if (!tiCompatibleWith(value, normPtrVal, true)) { Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch"); } return ptrVal; } typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType) { assert(!instrType.IsStruct()); typeInfo ptrVal; if (ptr.IsByRef()) { ptrVal = DereferenceByRef(ptr); if (instrType.IsObjRef() && !ptrVal.IsObjRef()) { Verify(false, "bad pointer"); } else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal)) { Verify(false, "pointer not consistent with instr"); } } else { Verify(false, "pointer not byref"); } return ptrVal; } // Verify that the field is used properly. 'tiThis' is NULL for statics, // 'fieldFlags' is the fields attributes, and mutator is true if it is a // ld*flda or a st*fld. // 'enclosingClass' is given if we are accessing a field in some specific type. void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken, const CORINFO_FIELD_INFO& fieldInfo, const typeInfo* tiThis, bool mutator, bool allowPlainStructAsThis) { CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass; unsigned fieldFlags = fieldInfo.fieldFlags; CORINFO_CLASS_HANDLE instanceClass = info.compClassHnd; // for statics, we imagine the instance is the current class. bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0); if (mutator) { Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static"); if ((fieldFlags & CORINFO_FLG_FIELD_FINAL)) { Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd && info.compIsStatic == isStaticField, "bad use of initonly field (set or address taken)"); } } if (tiThis == nullptr) { Verify(isStaticField, "used static opcode with non-static field"); } else { typeInfo tThis = *tiThis; if (allowPlainStructAsThis && tThis.IsValueClass()) { tThis.MakeByRef(); } // If it is null, we assume we can access it (since it will AV shortly) // If it is anything but a refernce class, there is no hierarchy, so // again, we don't need the precise instance class to compute 'protected' access if (tiThis->IsType(TI_REF)) { instanceClass = tiThis->GetClassHandleForObjRef(); } // Note that even if the field is static, we require that the this pointer // satisfy the same constraints as a non-static field This happens to // be simpler and seems reasonable typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); // we allow read-only tThis, on any field access (even stores!), because if the // class implementor wants to prohibit stores he should make the field private. // we do this by setting the read-only bit on the type we compare tThis to. tiDeclaredThis.SetIsReadonlyByRef(); } else if (verTrackObjCtorInitState && tThis.IsThisPtr()) { // Any field access is legal on "uninitialized" this pointers. // The easiest way to implement this is to simply set the // initialized bit for the duration of the type check on the // field access only. It does not change the state of the "this" // for the function as a whole. Note that the "tThis" is a copy // of the original "this" type (*tiThis) passed in. tThis.SetInitialisedObjRef(); } Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch"); } // Presently the JIT does not check that we don't store or take the address of init-only fields // since we cannot guarantee their immutability and it is not a security issue. // check any constraints on the fields's class --- accessing the field might cause a class constructor to run. VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass), "field has unsatisfied class constraints"); if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED) { Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass), "Accessing protected method through wrong type."); } } void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode) { if (tiOp1.IsNumberType()) { #ifdef TARGET_64BIT Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch"); #else // TARGET_64BIT // [10/17/2013] Consider changing this: to put on my verification lawyer hat, // this is non-conforming to the ECMA Spec: types don't have to be equivalent, // but compatible, since we can coalesce native int with int32 (see section III.1.5). Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch"); #endif // !TARGET_64BIT } else if (tiOp1.IsObjRef()) { switch (opcode) { case CEE_BEQ_S: case CEE_BEQ: case CEE_BNE_UN_S: case CEE_BNE_UN: case CEE_CEQ: case CEE_CGT_UN: break; default: Verify(false, "Cond not allowed on object types"); } Verify(tiOp2.IsObjRef(), "Cond type mismatch"); } else if (tiOp1.IsByRef()) { Verify(tiOp2.IsByRef(), "Cond type mismatch"); } else { Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch"); } } void Compiler::verVerifyThisPtrInitialised() { if (verTrackObjCtorInitState) { Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized"); } } bool Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target) { // Either target == context, in this case calling an alternate .ctor // Or target is the immediate parent of context return ((target == context) || (target == info.compCompHnd->getParentType(context))); } GenTree* Compiler::impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE)) { NO_WAY("Virtual call to a function added via EnC is not supported"); } // CoreRT generic virtual method if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI)) { GenTree* runtimeMethodHandle = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_METHOD_HDL, pCallInfo->hMethod); return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL, gtNewCallArgs(thisPtr, runtimeMethodHandle)); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { if (!pCallInfo->exactContextNeedsRuntimeLookup) { GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewCallArgs(thisPtr)); call->setEntryPoint(pCallInfo->codePointerLookup.constLookup); return call; } // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too. if (IsTargetAbi(CORINFO_CORERT_ABI)) { GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind); return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL, gtNewCallArgs(ctxTree), &pCallInfo->codePointerLookup.lookupKind); } } #endif // Get the exact descriptor for the static callsite GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken); if (exactTypeDesc == nullptr) { // compDonotInline() return nullptr; } GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken); if (exactMethodDesc == nullptr) { // compDonotInline() return nullptr; } GenTreeCall::Use* helpArgs = gtNewCallArgs(exactMethodDesc); helpArgs = gtPrependNewCallArg(exactTypeDesc, helpArgs); helpArgs = gtPrependNewCallArg(thisPtr, helpArgs); // Call helper function. This gets the target address of the final destination callsite. return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs); } //------------------------------------------------------------------------ // impBoxPatternMatch: match and import common box idioms // // Arguments: // pResolvedToken - resolved token from the box operation // codeAddr - position in IL stream after the box instruction // codeEndp - end of IL stream // // Return Value: // Number of IL bytes matched and imported, -1 otherwise // // Notes: // pResolvedToken is known to be a value type; ref type boxing // is handled in the CEE_BOX clause. int Compiler::impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const BYTE* codeAddr, const BYTE* codeEndp, bool makeInlineObservation) { if (codeAddr >= codeEndp) { return -1; } switch (codeAddr[0]) { case CEE_UNBOX_ANY: // box + unbox.any if (codeAddr + 1 + sizeof(mdToken) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 1 + sizeof(mdToken); } CORINFO_RESOLVED_TOKEN unboxResolvedToken; impResolveToken(codeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class); // See if the resolved tokens describe types that are equal. const TypeCompareState compare = info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, pResolvedToken->hClass); // If so, box/unbox.any is a nop. if (compare == TypeCompareState::Must) { JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n"); // Skip the next unbox.any instruction return 1 + sizeof(mdToken); } } break; case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: // box + br_true/false if ((codeAddr + ((codeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 0; } GenTree* const treeToBox = impStackTop().val; bool canOptimize = true; GenTree* treeToNullcheck = nullptr; // Can the thing being boxed cause a side effect? if ((treeToBox->gtFlags & GTF_SIDE_EFFECT) != 0) { // Is this a side effect we can replicate cheaply? if (((treeToBox->gtFlags & GTF_SIDE_EFFECT) == GTF_EXCEPT) && treeToBox->OperIs(GT_OBJ, GT_BLK, GT_IND)) { // Yes, we just need to perform a null check if needed. GenTree* const addr = treeToBox->AsOp()->gtGetOp1(); if (fgAddrCouldBeNull(addr)) { treeToNullcheck = addr; } } else { canOptimize = false; } } if (canOptimize) { CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); if (boxHelper == CORINFO_HELP_BOX) { JITDUMP("\n Importing BOX; BR_TRUE/FALSE as %sconstant\n", treeToNullcheck == nullptr ? "" : "nullcheck+"); impPopStack(); GenTree* result = gtNewIconNode(1); if (treeToNullcheck != nullptr) { GenTree* nullcheck = gtNewNullCheck(treeToNullcheck, compCurBB); result = gtNewOperNode(GT_COMMA, TYP_INT, nullcheck, result); } impPushOnStack(result, typeInfo(TI_INT)); return 0; } } } break; case CEE_ISINST: if (codeAddr + 1 + sizeof(mdToken) + 1 <= codeEndp) { const BYTE* nextCodeAddr = codeAddr + 1 + sizeof(mdToken); switch (nextCodeAddr[0]) { // box + isinst + br_true/false case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: if ((nextCodeAddr + ((nextCodeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 1 + sizeof(mdToken); } if (!(impStackTop().val->gtFlags & GTF_SIDE_EFFECT)) { CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); if (boxHelper == CORINFO_HELP_BOX) { CORINFO_RESOLVED_TOKEN isInstResolvedToken; impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting); TypeCompareState castResult = info.compCompHnd->compareTypesForCast(pResolvedToken->hClass, isInstResolvedToken.hClass); if (castResult != TypeCompareState::May) { JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant\n"); impPopStack(); impPushOnStack(gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0), typeInfo(TI_INT)); // Skip the next isinst instruction return 1 + sizeof(mdToken); } } else if (boxHelper == CORINFO_HELP_BOX_NULLABLE) { // For nullable we're going to fold it to "ldfld hasValue + brtrue/brfalse" or // "ldc.i4.0 + brtrue/brfalse" in case if the underlying type is not castable to // the target type. CORINFO_RESOLVED_TOKEN isInstResolvedToken; impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting); CORINFO_CLASS_HANDLE nullableCls = pResolvedToken->hClass; CORINFO_CLASS_HANDLE underlyingCls = info.compCompHnd->getTypeForBox(nullableCls); TypeCompareState castResult = info.compCompHnd->compareTypesForCast(underlyingCls, isInstResolvedToken.hClass); if (castResult == TypeCompareState::Must) { const CORINFO_FIELD_HANDLE hasValueFldHnd = info.compCompHnd->getFieldInClass(nullableCls, 0); assert(info.compCompHnd->getFieldOffset(hasValueFldHnd) == 0); assert(!strcmp(info.compCompHnd->getFieldName(hasValueFldHnd, nullptr), "hasValue")); GenTree* objToBox = impPopStack().val; // Spill struct to get its address (to access hasValue field) objToBox = impGetStructAddr(objToBox, nullableCls, (unsigned)CHECK_SPILL_ALL, true); impPushOnStack(gtNewFieldRef(TYP_BOOL, hasValueFldHnd, objToBox, 0), typeInfo(TI_INT)); JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as nullableVT.hasValue\n"); return 1 + sizeof(mdToken); } else if (castResult == TypeCompareState::MustNot) { impPopStack(); impPushOnStack(gtNewIconNode(0), typeInfo(TI_INT)); JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant (false)\n"); return 1 + sizeof(mdToken); } } } } break; // box + isinst + unbox.any case CEE_UNBOX_ANY: if ((nextCodeAddr + 1 + sizeof(mdToken)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 2 + sizeof(mdToken) * 2; } // See if the resolved tokens in box, isinst and unbox.any describe types that are equal. CORINFO_RESOLVED_TOKEN isinstResolvedToken = {}; impResolveToken(codeAddr + 1, &isinstResolvedToken, CORINFO_TOKENKIND_Class); if (info.compCompHnd->compareTypesForEquality(isinstResolvedToken.hClass, pResolvedToken->hClass) == TypeCompareState::Must) { CORINFO_RESOLVED_TOKEN unboxResolvedToken = {}; impResolveToken(nextCodeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class); // If so, box + isinst + unbox.any is a nop. if (info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, pResolvedToken->hClass) == TypeCompareState::Must) { JITDUMP("\n Importing BOX; ISINST, UNBOX.ANY as NOP\n"); return 2 + sizeof(mdToken) * 2; } } } break; } } break; default: break; } return -1; } //------------------------------------------------------------------------ // impImportAndPushBox: build and import a value-type box // // Arguments: // pResolvedToken - resolved token from the box operation // // Return Value: // None. // // Side Effects: // The value to be boxed is popped from the stack, and a tree for // the boxed value is pushed. This method may create upstream // statements, spill side effecting trees, and create new temps. // // If importing an inlinee, we may also discover the inline must // fail. If so there is no new value pushed on the stack. Callers // should use CompDoNotInline after calling this method to see if // ongoing importation should be aborted. // // Notes: // Boxing of ref classes results in the same value as the value on // the top of the stack, so is handled inline in impImportBlockCode // for the CEE_BOX case. Only value or primitive type boxes make it // here. // // Boxing for nullable types is done via a helper call; boxing // of other value types is expanded inline or handled via helper // call, depending on the jit's codegen mode. // // When the jit is operating in size and time constrained modes, // using a helper call here can save jit time and code size. But it // also may inhibit cleanup optimizations that could have also had a // even greater benefit effect on code size and jit time. An optimal // strategy may need to peek ahead and see if it is easy to tell how // the box is being used. For now, we defer. void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) { // Spill any special side effects impSpillSpecialSideEff(); // Get get the expression to box from the stack. GenTree* op1 = nullptr; GenTree* op2 = nullptr; StackEntry se = impPopStack(); CORINFO_CLASS_HANDLE operCls = se.seTypeInfo.GetClassHandle(); GenTree* exprToBox = se.val; // Look at what helper we should use. CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); // Determine what expansion to prefer. // // In size/time/debuggable constrained modes, the helper call // expansion for box is generally smaller and is preferred, unless // the value to box is a struct that comes from a call. In that // case the call can construct its return value directly into the // box payload, saving possibly some up-front zeroing. // // Currently primitive type boxes always get inline expanded. We may // want to do the same for small structs if they don't come from // calls and don't have GC pointers, since explicitly copying such // structs is cheap. JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via"); bool canExpandInline = (boxHelper == CORINFO_HELP_BOX); bool optForSize = !exprToBox->IsCall() && (operCls != nullptr) && opts.OptimizationDisabled(); bool expandInline = canExpandInline && !optForSize; if (expandInline) { JITDUMP(" inline allocate/copy sequence\n"); // we are doing 'normal' boxing. This means that we can inline the box operation // Box(expr) gets morphed into // temp = new(clsHnd) // cpobj(temp+4, expr, clsHnd) // push temp // The code paths differ slightly below for structs and primitives because // "cpobj" differs in these cases. In one case you get // impAssignStructPtr(temp+4, expr, clsHnd) // and the other you get // *(temp+4) = expr if (opts.OptimizationDisabled()) { // For minopts/debug code, try and minimize the total number // of box temps by reusing an existing temp when possible. if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM) { impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper")); } } else { // When optimizing, use a new temp for each box operation // since we then know the exact class of the box temp. impBoxTemp = lvaGrabTemp(true DEBUGARG("Single-def Box Helper")); lvaTable[impBoxTemp].lvType = TYP_REF; lvaTable[impBoxTemp].lvSingleDef = 1; JITDUMP("Marking V%02u as a single def local\n", impBoxTemp); const bool isExact = true; lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact); } // needs to stay in use until this box expression is appended // some other node. We approximate this by keeping it alive until // the opcode stack becomes empty impBoxTempInUse = true; // Remember the current last statement in case we need to move // a range of statements to ensure the box temp is initialized // before it's used. // Statement* const cursor = impLastStmt; const bool useParent = false; op1 = gtNewAllocObjNode(pResolvedToken, useParent); if (op1 == nullptr) { // If we fail to create the newobj node, we must be inlining // and have run across a type we can't describe. // assert(compDonotInline()); return; } // Remember that this basic block contains 'new' of an object, // and so does this method // compCurBB->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; // Assign the boxed object to the box temp. // GenTree* asg = gtNewTempAssign(impBoxTemp, op1); Statement* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // If the exprToBox is a call that returns its value via a ret buf arg, // move the assignment statement(s) before the call (which must be a top level tree). // // We do this because impAssignStructPtr (invoked below) will // back-substitute into a call when it sees a GT_RET_EXPR and the call // has a hidden buffer pointer, So we need to reorder things to avoid // creating out-of-sequence IR. // if (varTypeIsStruct(exprToBox) && exprToBox->OperIs(GT_RET_EXPR)) { GenTreeCall* const call = exprToBox->AsRetExpr()->gtInlineCandidate->AsCall(); if (call->HasRetBufArg()) { JITDUMP("Must insert newobj stmts for box before call [%06u]\n", dspTreeID(call)); // Walk back through the statements in this block, looking for the one // that has this call as the root node. // // Because gtNewTempAssign (above) may have added statements that // feed into the actual assignment we need to move this set of added // statements as a group. // // Note boxed allocations are side-effect free (no com or finalizer) so // our only worries here are (correctness) not overlapping the box temp // lifetime and (perf) stretching the temp lifetime across the inlinee // body. // // Since this is an inline candidate, we must be optimizing, and so we have // a unique box temp per call. So no worries about overlap. // assert(!opts.OptimizationDisabled()); // Lifetime stretching could addressed with some extra cleverness--sinking // the allocation back down to just before the copy, once we figure out // where the copy is. We defer for now. // Statement* insertBeforeStmt = cursor; noway_assert(insertBeforeStmt != nullptr); while (true) { if (insertBeforeStmt->GetRootNode() == call) { break; } // If we've searched all the statements in the block and failed to // find the call, then something's wrong. // noway_assert(insertBeforeStmt != impStmtList); insertBeforeStmt = insertBeforeStmt->GetPrevStmt(); } // Found the call. Move the statements comprising the assignment. // JITDUMP("Moving " FMT_STMT "..." FMT_STMT " before " FMT_STMT "\n", cursor->GetNextStmt()->GetID(), asgStmt->GetID(), insertBeforeStmt->GetID()); assert(asgStmt == impLastStmt); do { Statement* movingStmt = impExtractLastStmt(); impInsertStmtBefore(movingStmt, insertBeforeStmt); insertBeforeStmt = movingStmt; } while (impLastStmt != cursor); } } // Create a pointer to the box payload in op1. // op1 = gtNewLclvNode(impBoxTemp, TYP_REF); op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2); // Copy from the exprToBox to the box payload. // if (varTypeIsStruct(exprToBox)) { assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls)); op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL); } else { var_types lclTyp = exprToBox->TypeGet(); if (lclTyp == TYP_BYREF) { lclTyp = TYP_I_IMPL; } CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass); if (impIsPrimitive(jitType)) { lclTyp = JITtype2varType(jitType); } var_types srcTyp = exprToBox->TypeGet(); var_types dstTyp = lclTyp; // We allow float <-> double mismatches and implicit truncation for small types. assert((genActualType(srcTyp) == genActualType(dstTyp)) || (varTypeIsFloating(srcTyp) == varTypeIsFloating(dstTyp))); // Note regarding small types. // We are going to store to the box here via an indirection, so the cast added below is // redundant, since the store has an implicit truncation semantic. The reason we still // add this cast is so that the code which deals with GT_BOX optimizations does not have // to account for this implicit truncation (e. g. understand that BOX<byte>(0xFF + 1) is // actually BOX<byte>(0) or deal with signedness mismatch and other GT_CAST complexities). if (srcTyp != dstTyp) { exprToBox = gtNewCastNode(genActualType(dstTyp), exprToBox, false, dstTyp); } op1 = gtNewAssignNode(gtNewOperNode(GT_IND, dstTyp, op1), exprToBox); } // Spill eval stack to flush out any pending side effects. impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox")); // Set up this copy as a second assignment. Statement* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); op1 = gtNewLclvNode(impBoxTemp, TYP_REF); // Record that this is a "box" node and keep track of the matching parts. op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt); // If it is a value class, mark the "box" node. We can use this information // to optimise several cases: // "box(x) == null" --> false // "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod" // "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod" op1->gtFlags |= GTF_BOX_VALUE; assert(op1->IsBoxedValue()); assert(asg->gtOper == GT_ASG); } else { // Don't optimize, just call the helper and be done with it. JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable"); assert(operCls != nullptr); // Ensure that the value class is restored op2 = impTokenToHandle(pResolvedToken, nullptr, true /* mustRestoreHandle */); if (op2 == nullptr) { // We must be backing out of an inline. assert(compDonotInline()); return; } GenTreeCall::Use* args = gtNewCallArgs(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true)); op1 = gtNewHelperCallNode(boxHelper, TYP_REF, args); } /* Push the result back on the stack, */ /* even if clsHnd is a value class we want the TI_REF */ typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass)); impPushOnStack(op1, tiRetVal); } //------------------------------------------------------------------------ // impImportNewObjArray: Build and import `new` of multi-dimmensional array // // Arguments: // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized // by a call to CEEInfo::resolveToken(). // pCallInfo - The CORINFO_CALL_INFO that has been initialized // by a call to CEEInfo::getCallInfo(). // // Assumptions: // The multi-dimensional array constructor arguments (array dimensions) are // pushed on the IL stack on entry to this method. // // Notes: // Multi-dimensional array constructors are imported as calls to a JIT // helper, not as regular calls. void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken); if (classHandle == nullptr) { // compDonotInline() return; } assert(pCallInfo->sig.numArgs); GenTree* node; // Reuse the temp used to pass the array dimensions to avoid bloating // the stack frame in case there are multiple calls to multi-dim array // constructors within a single method. if (lvaNewObjArrayArgs == BAD_VAR_NUM) { lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs")); lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK; lvaTable[lvaNewObjArrayArgs].lvExactSize = 0; } // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers // for our call to CORINFO_HELP_NEW_MDARR. lvaTable[lvaNewObjArrayArgs].lvExactSize = max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32)); // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments // to one allocation at a time. impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray")); // // The arguments of the CORINFO_HELP_NEW_MDARR helper are: // - Array class handle // - Number of dimension arguments // - Pointer to block of int32 dimensions - address of lvaNewObjArrayArgs temp. // node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK); node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node); // Pop dimension arguments from the stack one at a time and store it // into lvaNewObjArrayArgs temp. for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--) { GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT); GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK); dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest); dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest, new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i)); dest = gtNewOperNode(GT_IND, TYP_INT, dest); node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node); } GenTreeCall::Use* args = gtNewCallArgs(node); // pass number of arguments to the helper args = gtPrependNewCallArg(gtNewIconNode(pCallInfo->sig.numArgs), args); args = gtPrependNewCallArg(classHandle, args); node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args); for (GenTreeCall::Use& use : node->AsCall()->Args()) { node->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT; } node->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass; // Remember that this basic block contains 'new' of a md array compCurBB->bbFlags |= BBF_HAS_NEWARRAY; impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass)); } GenTree* Compiler::impTransformThis(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM transform) { switch (transform) { case CORINFO_DEREF_THIS: { GenTree* obj = thisPtr; // This does a LDIND on the obj, which should be a byref. pointing to a ref impBashVarAddrsToI(obj); assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF); CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass); obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj); // ldind could point anywhere, example a boxed class static int obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE); return obj; } case CORINFO_BOX_THIS: { // Constraint calls where there might be no // unboxed entry point require us to implement the call via helper. // These only occur when a possible target of the call // may have inherited an implementation of an interface // method from System.Object or System.ValueType. The EE does not provide us with // "unboxed" versions of these methods. GenTree* obj = thisPtr; assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL); obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj); obj->gtFlags |= GTF_EXCEPT; CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass); if (impIsPrimitive(jitTyp)) { if (obj->OperIsBlk()) { obj->ChangeOperUnchecked(GT_IND); // Obj could point anywhere, example a boxed class static int obj->gtFlags |= GTF_IND_TGTANYWHERE; obj->AsOp()->gtOp2 = nullptr; // must be zero for tree walkers } obj->gtType = JITtype2varType(jitTyp); assert(varTypeIsArithmetic(obj->gtType)); } // This pushes on the dereferenced byref // This is then used immediately to box. impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack()); // This pops off the byref-to-a-value-type remaining on the stack and // replaces it with a boxed object. // This is then used as the object to the virtual call immediately below. impImportAndPushBox(pConstrainedResolvedToken); if (compDonotInline()) { return nullptr; } obj = impPopStack().val; return obj; } case CORINFO_NO_THIS_TRANSFORM: default: return thisPtr; } } //------------------------------------------------------------------------ // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method. // // Return Value: // true if PInvoke inlining should be enabled in current method, false otherwise // // Notes: // Checks a number of ambient conditions where we could pinvoke but choose not to bool Compiler::impCanPInvokeInline() { return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) && (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke ; } //------------------------------------------------------------------------ // impCanPInvokeInlineCallSite: basic legality checks using information // from a call to see if the call qualifies as an inline pinvoke. // // Arguments: // block - block contaning the call, or for inlinees, block // containing the call being inlined // // Return Value: // true if this call can legally qualify as an inline pinvoke, false otherwise // // Notes: // For runtimes that support exception handling interop there are // restrictions on using inline pinvoke in handler regions. // // * We have to disable pinvoke inlining inside of filters because // in case the main execution (i.e. in the try block) is inside // unmanaged code, we cannot reuse the inlined stub (we still need // the original state until we are in the catch handler) // // * We disable pinvoke inlining inside handlers since the GSCookie // is in the inlined Frame (see // CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but // this would not protect framelets/return-address of handlers. // // These restrictions are currently also in place for CoreCLR but // can be relaxed when coreclr/#8459 is addressed. bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block) { if (block->hasHndIndex()) { return false; } // The remaining limitations do not apply to CoreRT if (IsTargetAbi(CORINFO_CORERT_ABI)) { return true; } #ifdef TARGET_64BIT // On 64-bit platforms, we disable pinvoke inlining inside of try regions. // Note that this could be needed on other architectures too, but we // haven't done enough investigation to know for sure at this point. // // Here is the comment from JIT64 explaining why: // [VSWhidbey: 611015] - because the jitted code links in the // Frame (instead of the stub) we rely on the Frame not being // 'active' until inside the stub. This normally happens by the // stub setting the return address pointer in the Frame object // inside the stub. On a normal return, the return address // pointer is zeroed out so the Frame can be safely re-used, but // if an exception occurs, nobody zeros out the return address // pointer. Thus if we re-used the Frame object, it would go // 'active' as soon as we link it into the Frame chain. // // Technically we only need to disable PInvoke inlining if we're // in a handler or if we're in a try body with a catch or // filter/except where other non-handler code in this method // might run and try to re-use the dirty Frame object. // // A desktop test case where this seems to matter is // jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe if (block->hasTryIndex()) { // This does not apply to the raw pinvoke call that is inside the pinvoke // ILStub. In this case, we have to inline the raw pinvoke call into the stub, // otherwise we would end up with a stub that recursively calls itself, and end // up with a stack overflow. if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers()) { return true; } return false; } #endif // TARGET_64BIT return true; } //------------------------------------------------------------------------ // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so // if it can be expressed as an inline pinvoke. // // Arguments: // call - tree for the call // methHnd - handle for the method being called (may be null) // sig - signature of the method being called // mflags - method flags for the method being called // block - block contaning the call, or for inlinees, block // containing the call being inlined // // Notes: // Sets GTF_CALL_M_PINVOKE on the call for pinvokes. // // Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the // call passes a combination of legality and profitabilty checks. // // If GTF_CALL_UNMANAGED is set, increments info.compUnmanagedCallCountWithGCTransition void Compiler::impCheckForPInvokeCall( GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block) { CorInfoCallConvExtension unmanagedCallConv; // If VM flagged it as Pinvoke, flag the call node accordingly if ((mflags & CORINFO_FLG_PINVOKE) != 0) { call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE; } bool suppressGCTransition = false; if (methHnd) { if ((mflags & CORINFO_FLG_PINVOKE) == 0) { return; } unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd, nullptr, &suppressGCTransition); } else { if (sig->getCallConv() == CORINFO_CALLCONV_DEFAULT || sig->getCallConv() == CORINFO_CALLCONV_VARARG) { return; } unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(nullptr, sig, &suppressGCTransition); assert(!call->gtCallCookie); } if (suppressGCTransition) { call->gtCallMoreFlags |= GTF_CALL_M_SUPPRESS_GC_TRANSITION; } // If we can't get the unmanaged calling convention or the calling convention is unsupported in the JIT, // return here without inlining the native call. if (unmanagedCallConv == CorInfoCallConvExtension::Managed || unmanagedCallConv == CorInfoCallConvExtension::Fastcall || unmanagedCallConv == CorInfoCallConvExtension::FastcallMemberFunction) { return; } optNativeCallCount++; if (methHnd == nullptr && (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) || IsTargetAbi(CORINFO_CORERT_ABI))) { // PInvoke in CoreRT ABI must be always inlined. Non-inlineable CALLI cases have been // converted to regular method calls earlier using convertPInvokeCalliToCall. // PInvoke CALLI in IL stubs must be inlined } else { // Check legality if (!impCanPInvokeInlineCallSite(block)) { return; } // Legal PInvoke CALL in PInvoke IL stubs must be inlined to avoid infinite recursive // inlining in CoreRT. Skip the ambient conditions checks and profitability checks. if (!IsTargetAbi(CORINFO_CORERT_ABI) || (info.compFlags & CORINFO_FLG_PINVOKE) == 0) { if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers()) { // Raw PInvoke call in PInvoke IL stub generated must be inlined to avoid infinite // recursive calls to the stub. } else { if (!impCanPInvokeInline()) { return; } // Size-speed tradeoff: don't use inline pinvoke at rarely // executed call sites. The non-inline version is more // compact. if (block->isRunRarely()) { return; } } } // The expensive check should be last if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig)) { return; } } JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s\n", info.compFullName)); call->gtFlags |= GTF_CALL_UNMANAGED; call->unmgdCallConv = unmanagedCallConv; if (!call->IsSuppressGCTransition()) { info.compUnmanagedCallCountWithGCTransition++; } // AMD64 convention is same for native and managed if (unmanagedCallConv == CorInfoCallConvExtension::C || unmanagedCallConv == CorInfoCallConvExtension::CMemberFunction) { call->gtFlags |= GTF_CALL_POP_ARGS; } if (unmanagedCallConv == CorInfoCallConvExtension::Thiscall) { call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL; } } GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di) { var_types callRetTyp = JITtype2varType(sig->retType); /* The function pointer is on top of the stack - It may be a * complex expression. As it is evaluated after the args, * it may cause registered args to be spilled. Simply spill it. */ // Ignore this trivial case. if (impStackTop().val->gtOper != GT_LCL_VAR) { impSpillStackEntry(verCurrentState.esStackDepth - 1, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall")); } /* Get the function pointer */ GenTree* fptr = impPopStack().val; // The function pointer is typically a sized to match the target pointer size // However, stubgen IL optimization can change LDC.I8 to LDC.I4 // See ILCodeStream::LowerOpcode assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT); #ifdef DEBUG // This temporary must never be converted to a double in stress mode, // because that can introduce a call to the cast helper after the // arguments have already been evaluated. if (fptr->OperGet() == GT_LCL_VAR) { lvaTable[fptr->AsLclVarCommon()->GetLclNum()].lvKeepType = 1; } #endif /* Create the call node */ GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, di); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); #ifdef UNIX_X86_ABI call->gtFlags &= ~GTF_CALL_POP_ARGS; #endif return call; } /*****************************************************************************/ void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig) { assert(call->gtFlags & GTF_CALL_UNMANAGED); /* Since we push the arguments in reverse order (i.e. right -> left) * spill any side effects from the stack * * OBS: If there is only one side effect we do not need to spill it * thus we have to spill all side-effects except last one */ unsigned lastLevelWithSideEffects = UINT_MAX; unsigned argsToReverse = sig->numArgs; // For "thiscall", the first argument goes in a register. Since its // order does not need to be changed, we do not need to spill it if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { assert(argsToReverse); argsToReverse--; } #ifndef TARGET_X86 // Don't reverse args on ARM or x64 - first four args always placed in regs in order argsToReverse = 0; #endif for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++) { if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF) { assert(lastLevelWithSideEffects == UINT_MAX); impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect")); } else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) { if (lastLevelWithSideEffects != UINT_MAX) { /* We had a previous side effect - must spill it */ impSpillStackEntry(lastLevelWithSideEffects, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect")); /* Record the level for the current side effect in case we will spill it */ lastLevelWithSideEffects = level; } else { /* This is the first side effect encountered - record its level */ lastLevelWithSideEffects = level; } } } /* The argument list is now "clean" - no out-of-order side effects * Pop the argument list in reverse order */ GenTreeCall::Use* args = impPopReverseCallArgs(sig->numArgs, sig, sig->numArgs - argsToReverse); call->AsCall()->gtCallArgs = args; if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { GenTree* thisPtr = args->GetNode(); impBashVarAddrsToI(thisPtr); assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF); } for (GenTreeCall::Use& argUse : GenTreeCall::UseList(args)) { GenTree* arg = argUse.GetNode(); call->gtFlags |= arg->gtFlags & GTF_GLOB_EFFECT; // We should not be passing gc typed args to an unmanaged call. if (varTypeIsGC(arg->TypeGet())) { // Tolerate byrefs by retyping to native int. // // This is needed or we'll generate inconsistent GC info // for this arg at the call site (gc info says byref, // pinvoke sig says native int). // if (arg->TypeGet() == TYP_BYREF) { arg->ChangeType(TYP_I_IMPL); } else { assert(!"*** invalid IL: gc ref passed to unmanaged call"); } } } } //------------------------------------------------------------------------ // impInitClass: Build a node to initialize the class before accessing the // field if necessary // // Arguments: // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized // by a call to CEEInfo::resolveToken(). // // Return Value: If needed, a pointer to the node that will perform the class // initializtion. Otherwise, nullptr. // GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken) { CorInfoInitClassResult initClassResult = info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle); if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0) { return nullptr; } bool runtimeLookup; GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup); if (node == nullptr) { assert(compDonotInline()); return nullptr; } if (runtimeLookup) { node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewCallArgs(node)); } else { // Call the shared non gc static helper, as its the fastest node = fgGetSharedCCtor(pResolvedToken->hClass); } return node; } GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp) { GenTree* op1 = nullptr; #if defined(DEBUG) // If we're replaying under SuperPMI, we're going to read the data stored by SuperPMI and use it // for optimization. Unfortunately, SuperPMI doesn't implement a guarantee on the alignment of // this data, so for some platforms which don't allow unaligned access (e.g., Linux arm32), // this can fault. We should fix SuperPMI to guarantee alignment, but that is a big change. // Instead, simply fix up the data here for future use. // This variable should be the largest size element, with the largest alignment requirement, // and the native C++ compiler should guarantee sufficient alignment. double aligned_data = 0.0; void* p_aligned_data = &aligned_data; if (info.compMethodSuperPMIIndex != -1) { switch (lclTyp) { case TYP_BOOL: case TYP_BYTE: case TYP_UBYTE: static_assert_no_msg(sizeof(unsigned __int8) == sizeof(bool)); static_assert_no_msg(sizeof(unsigned __int8) == sizeof(signed char)); static_assert_no_msg(sizeof(unsigned __int8) == sizeof(unsigned char)); // No alignment necessary for byte. break; case TYP_SHORT: case TYP_USHORT: static_assert_no_msg(sizeof(unsigned __int16) == sizeof(short)); static_assert_no_msg(sizeof(unsigned __int16) == sizeof(unsigned short)); if ((size_t)fldAddr % sizeof(unsigned __int16) != 0) { *(unsigned __int16*)p_aligned_data = GET_UNALIGNED_16(fldAddr); fldAddr = p_aligned_data; } break; case TYP_INT: case TYP_UINT: case TYP_FLOAT: static_assert_no_msg(sizeof(unsigned __int32) == sizeof(int)); static_assert_no_msg(sizeof(unsigned __int32) == sizeof(unsigned int)); static_assert_no_msg(sizeof(unsigned __int32) == sizeof(float)); if ((size_t)fldAddr % sizeof(unsigned __int32) != 0) { *(unsigned __int32*)p_aligned_data = GET_UNALIGNED_32(fldAddr); fldAddr = p_aligned_data; } break; case TYP_LONG: case TYP_ULONG: case TYP_DOUBLE: static_assert_no_msg(sizeof(unsigned __int64) == sizeof(__int64)); static_assert_no_msg(sizeof(unsigned __int64) == sizeof(double)); if ((size_t)fldAddr % sizeof(unsigned __int64) != 0) { *(unsigned __int64*)p_aligned_data = GET_UNALIGNED_64(fldAddr); fldAddr = p_aligned_data; } break; default: assert(!"Unexpected lclTyp"); break; } } #endif // DEBUG switch (lclTyp) { int ival; __int64 lval; double dval; case TYP_BOOL: ival = *((bool*)fldAddr); goto IVAL_COMMON; case TYP_BYTE: ival = *((signed char*)fldAddr); goto IVAL_COMMON; case TYP_UBYTE: ival = *((unsigned char*)fldAddr); goto IVAL_COMMON; case TYP_SHORT: ival = *((short*)fldAddr); goto IVAL_COMMON; case TYP_USHORT: ival = *((unsigned short*)fldAddr); goto IVAL_COMMON; case TYP_UINT: case TYP_INT: ival = *((int*)fldAddr); IVAL_COMMON: op1 = gtNewIconNode(ival); break; case TYP_LONG: case TYP_ULONG: lval = *((__int64*)fldAddr); op1 = gtNewLconNode(lval); break; case TYP_FLOAT: dval = *((float*)fldAddr); op1 = gtNewDconNode(dval); op1->gtType = TYP_FLOAT; break; case TYP_DOUBLE: dval = *((double*)fldAddr); op1 = gtNewDconNode(dval); break; default: assert(!"Unexpected lclTyp"); break; } return op1; } GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp) { // Ordinary static fields never overlap. RVA statics, however, can overlap (if they're // mapped to the same ".data" declaration). That said, such mappings only appear to be // possible with ILASM, and in ILASM-produced (ILONLY) images, RVA statics are always // read-only (using "stsfld" on them is UB). In mixed-mode assemblies, RVA statics can // be mutable, but the only current producer of such images, the C++/CLI compiler, does // not appear to support mapping different fields to the same address. So we will say // that "mutable overlapping RVA statics" are UB as well. If this ever changes, code in // morph and value numbering will need to be updated to respect "gtFldMayOverlap" and // "NotAField FldSeq". // For statics that are not "boxed", the initial address tree will contain the field sequence. // For those that are, we will attach it later, when adding the indirection for the box, since // that tree will represent the true address. bool isBoxedStatic = (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) != 0; FieldSeqNode* innerFldSeq = !isBoxedStatic ? GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField) : FieldSeqStore::NotAField(); GenTree* op1; switch (pFieldInfo->fieldAccessor) { case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: { assert(!compIsForInlining()); // We first call a special helper to get the statics base pointer op1 = impParentClassTokenToHandle(pResolvedToken); // compIsForInlining() is false so we should not get NULL here assert(op1 != nullptr); var_types type = TYP_BYREF; switch (pFieldInfo->helper) { case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE: type = TYP_I_IMPL; break; case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE: case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE: case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE: break; default: assert(!"unknown generic statics helper"); break; } op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewCallArgs(op1)); op1 = gtNewOperNode(GT_ADD, type, op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); } break; case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeFlags callFlags = GTF_EMPTY; if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT) { callFlags |= GTF_CALL_HOISTABLE; } op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF); op1->gtFlags |= callFlags; op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup); } else #endif { op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper); } op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); break; } case CORINFO_FIELD_STATIC_READYTORUN_HELPER: { #ifdef FEATURE_READYTORUN assert(opts.IsReadyToRun()); assert(!compIsForInlining()); CORINFO_LOOKUP_KIND kind; info.compCompHnd->getLocationOfThisType(info.compMethodHnd, &kind); assert(kind.needsRuntimeLookup); GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind); GenTreeCall::Use* args = gtNewCallArgs(ctxTree); GenTreeFlags callFlags = GTF_EMPTY; if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT) { callFlags |= GTF_CALL_HOISTABLE; } var_types type = TYP_BYREF; op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args); op1->gtFlags |= callFlags; op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup); op1 = gtNewOperNode(GT_ADD, type, op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); #else unreached(); #endif // FEATURE_READYTORUN } break; default: { // Do we need the address of a static field? // if (access & CORINFO_ACCESS_ADDRESS) { void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr); // We should always be able to access this static's address directly. assert(pFldAddr == nullptr); // Create the address node. GenTreeFlags handleKind = isBoxedStatic ? GTF_ICON_STATIC_BOX_PTR : GTF_ICON_STATIC_HDL; op1 = gtNewIconHandleNode((size_t)fldAddr, handleKind, innerFldSeq); #ifdef DEBUG op1->AsIntCon()->gtTargetHandle = op1->AsIntCon()->gtIconVal; #endif if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { op1->gtFlags |= GTF_ICON_INITCLASS; } } else // We need the value of a static field { // In future, it may be better to just create the right tree here instead of folding it later. op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField); if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { op1->gtFlags |= GTF_FLD_INITCLASS; } if (isBoxedStatic) { FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField); op1->ChangeType(TYP_REF); // points at boxed object op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq)); if (varTypeIsStruct(lclTyp)) { // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT. op1 = gtNewObjNode(pFieldInfo->structType, op1); } else { op1 = gtNewOperNode(GT_IND, lclTyp, op1); op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING; } } return op1; } break; } } if (isBoxedStatic) { FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField); op1 = gtNewOperNode(GT_IND, TYP_REF, op1); op1->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq)); } if (!(access & CORINFO_ACCESS_ADDRESS)) { if (varTypeIsStruct(lclTyp)) { // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT. op1 = gtNewObjNode(pFieldInfo->structType, op1); } else { op1 = gtNewOperNode(GT_IND, lclTyp, op1); op1->gtFlags |= GTF_GLOB_REF; } } return op1; } // In general try to call this before most of the verification work. Most people expect the access // exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns // out if you can't access something we also think that you're unverifiable for other reasons. void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall) { if (result != CORINFO_ACCESS_ALLOWED) { impHandleAccessAllowedInternal(result, helperCall); } } void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall) { switch (result) { case CORINFO_ACCESS_ALLOWED: break; case CORINFO_ACCESS_ILLEGAL: // if we're verifying, then we need to reject the illegal access to ensure that we don't think the // method is verifiable. Otherwise, delay the exception to runtime. if (compIsForImportOnly()) { info.compCompHnd->ThrowExceptionForHelper(helperCall); } else { impInsertHelperCall(helperCall); } break; } } void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo) { // Construct the argument list GenTreeCall::Use* args = nullptr; assert(helperInfo->helperNum != CORINFO_HELP_UNDEF); for (unsigned i = helperInfo->numArgs; i > 0; --i) { const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1]; GenTree* currentArg = nullptr; switch (helperArg.argType) { case CORINFO_HELPER_ARG_TYPE_Field: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun( info.compCompHnd->getFieldClass(helperArg.fieldHandle)); currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle); break; case CORINFO_HELPER_ARG_TYPE_Method: info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle); currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle); break; case CORINFO_HELPER_ARG_TYPE_Class: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle); currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle); break; case CORINFO_HELPER_ARG_TYPE_Module: currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle); break; case CORINFO_HELPER_ARG_TYPE_Const: currentArg = gtNewIconNode(helperArg.constant); break; default: NO_WAY("Illegal helper arg type"); } args = gtPrependNewCallArg(currentArg, args); } /* TODO-Review: * Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee. * Also, consider sticking this in the first basic block. */ GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args); impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } //------------------------------------------------------------------------ // impTailCallRetTypeCompatible: Checks whether the return types of caller // and callee are compatible so that calle can be tail called. // sizes are not supported integral type sizes return values to temps. // // Arguments: // allowWidening -- whether to allow implicit widening by the callee. // For instance, allowing int32 -> int16 tailcalls. // The managed calling convention allows this, but // we don't want explicit tailcalls to depend on this // detail of the managed calling convention. // callerRetType -- the caller's return type // callerRetTypeClass - the caller's return struct type // callerCallConv -- calling convention of the caller // calleeRetType -- the callee's return type // calleeRetTypeClass - the callee return struct type // calleeCallConv -- calling convention of the callee // // Returns: // True if the tailcall types are compatible. // // Remarks: // Note that here we don't check compatibility in IL Verifier sense, but on the // lines of return types getting returned in the same return register. bool Compiler::impTailCallRetTypeCompatible(bool allowWidening, var_types callerRetType, CORINFO_CLASS_HANDLE callerRetTypeClass, CorInfoCallConvExtension callerCallConv, var_types calleeRetType, CORINFO_CLASS_HANDLE calleeRetTypeClass, CorInfoCallConvExtension calleeCallConv) { // Early out if the types are the same. if (callerRetType == calleeRetType) { return true; } // For integral types the managed calling convention dictates that callee // will widen the return value to 4 bytes, so we can allow implicit widening // in managed to managed tailcalls when dealing with <= 4 bytes. bool isManaged = (callerCallConv == CorInfoCallConvExtension::Managed) && (calleeCallConv == CorInfoCallConvExtension::Managed); if (allowWidening && isManaged && varTypeIsIntegral(callerRetType) && varTypeIsIntegral(calleeRetType) && (genTypeSize(callerRetType) <= 4) && (genTypeSize(calleeRetType) <= genTypeSize(callerRetType))) { return true; } // If the class handles are the same and not null, the return types are compatible. if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass)) { return true; } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Jit64 compat: if (callerRetType == TYP_VOID) { // This needs to be allowed to support the following IL pattern that Jit64 allows: // tail.call // pop // ret // // Note that the above IL pattern is not valid as per IL verification rules. // Therefore, only full trust code can take advantage of this pattern. return true; } // These checks return true if the return value type sizes are the same and // get returned in the same return register i.e. caller doesn't need to normalize // return value. Some of the tail calls permitted by below checks would have // been rejected by IL Verifier before we reached here. Therefore, only full // trust code can make those tail calls. unsigned callerRetTypeSize = 0; unsigned calleeRetTypeSize = 0; bool isCallerRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true, info.compIsVarArgs, callerCallConv); bool isCalleeRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true, info.compIsVarArgs, calleeCallConv); if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg) { return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize); } #endif // TARGET_AMD64 || TARGET_ARM64 return false; } /******************************************************************************** * * Returns true if the current opcode and and the opcodes following it correspond * to a supported tail call IL pattern. * */ bool Compiler::impIsTailCallILPattern( bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive) { // Bail out if the current opcode is not a call. if (!impOpcodeIsCallOpcode(curOpcode)) { return false; } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // If shared ret tail opt is not enabled, we will enable // it for recursive methods. if (isRecursive) #endif { // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the // sequence. Make sure we don't go past the end of the IL however. codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize); } // Bail out if there is no next opcode after call if (codeAddrOfNextOpcode >= codeEnd) { return false; } OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode); return (nextOpcode == CEE_RET); } /***************************************************************************** * * Determine whether the call could be converted to an implicit tail call * */ bool Compiler::impIsImplicitTailCallCandidate( OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive) { #if FEATURE_TAILCALL_OPT if (!opts.compTailCallOpt) { return false; } if (opts.OptimizationDisabled()) { return false; } // must not be tail prefixed if (prefixFlags & PREFIX_TAILCALL_EXPLICIT) { return false; } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // the block containing call is marked as BBJ_RETURN // We allow shared ret tail call optimization on recursive calls even under // !FEATURE_TAILCALL_OPT_SHARED_RETURN. if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN)) return false; #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN // must be call+ret or call+pop+ret if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive)) { return false; } return true; #else return false; #endif // FEATURE_TAILCALL_OPT } //------------------------------------------------------------------------ // impImportCall: import a call-inspiring opcode // // Arguments: // opcode - opcode that inspires the call // pResolvedToken - resolved token for the call target // pConstrainedResolvedToken - resolved constraint token (or nullptr) // newObjThis - tree for this pointer or uninitalized newobj temp (or nullptr) // prefixFlags - IL prefix flags for the call // callInfo - EE supplied info for the call // rawILOffset - IL offset of the opcode, used for guarded devirtualization. // // Returns: // Type of the call's return value. // If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF. // However we can't assert for this here yet because there are cases we miss. See issue #13272. // // // Notes: // opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ. // // For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated // uninitalized object. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif var_types Compiler::impImportCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, GenTree* newobjThis, int prefixFlags, CORINFO_CALL_INFO* callInfo, IL_OFFSET rawILOffset) { assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI); // The current statement DI may not refer to the exact call, but for calls // we wish to be able to attach the exact IL instruction to get "return // value" support in the debugger, so create one with the exact IL offset. DebugInfo di = impCreateDIWithCurrentStackInfo(rawILOffset, true); var_types callRetTyp = TYP_COUNT; CORINFO_SIG_INFO* sig = nullptr; CORINFO_METHOD_HANDLE methHnd = nullptr; CORINFO_CLASS_HANDLE clsHnd = nullptr; unsigned clsFlags = 0; unsigned mflags = 0; GenTree* call = nullptr; GenTreeCall::Use* args = nullptr; CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM; CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr; bool exactContextNeedsRuntimeLookup = false; bool canTailCall = true; const char* szCanTailCallFailReason = nullptr; const int tailCallFlags = (prefixFlags & PREFIX_TAILCALL); const bool isReadonlyCall = (prefixFlags & PREFIX_READONLY) != 0; CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr; // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could // do that before tailcalls, but that is probably not the intended // semantic. So just disallow tailcalls from synchronized methods. // Also, popping arguments in a varargs function is more work and NYI // If we have a security object, we have to keep our frame around for callers // to see any imperative security. // Reverse P/Invokes need a call to CORINFO_HELP_JIT_REVERSE_PINVOKE_EXIT // at the end, so tailcalls should be disabled. if (info.compFlags & CORINFO_FLG_SYNCH) { canTailCall = false; szCanTailCallFailReason = "Caller is synchronized"; } else if (opts.IsReversePInvoke()) { canTailCall = false; szCanTailCallFailReason = "Caller is Reverse P/Invoke"; } #if !FEATURE_FIXED_OUT_ARGS else if (info.compIsVarArgs) { canTailCall = false; szCanTailCallFailReason = "Caller is varargs"; } #endif // FEATURE_FIXED_OUT_ARGS // We only need to cast the return value of pinvoke inlined calls that return small types // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there. // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for // the time being that the callee might be compiled by the other JIT and thus the return // value will need to be widened by us (or not widened at all...) // ReadyToRun code sticks with default calling convention that does not widen small return types. bool checkForSmallType = opts.IsReadyToRun(); bool bIntrinsicImported = false; CORINFO_SIG_INFO calliSig; GenTreeCall::Use* extraArg = nullptr; /*------------------------------------------------------------------------- * First create the call node */ if (opcode == CEE_CALLI) { if (IsTargetAbi(CORINFO_CORERT_ABI)) { // See comment in impCheckForPInvokeCall BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; if (info.compCompHnd->convertPInvokeCalliToCall(pResolvedToken, !impCanPInvokeInlineCallSite(block))) { eeGetCallInfo(pResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, callInfo); return impImportCall(CEE_CALL, pResolvedToken, nullptr, nullptr, prefixFlags, callInfo, rawILOffset); } } /* Get the call site sig */ eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &calliSig); callRetTyp = JITtype2varType(calliSig.retType); call = impImportIndirectCall(&calliSig, di); // We don't know the target method, so we have to infer the flags, or // assume the worst-case. mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC; #ifdef DEBUG if (verbose) { unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0; printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n", opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize); } #endif sig = &calliSig; } else // (opcode != CEE_CALLI) { NamedIntrinsic ni = NI_Illegal; // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to // supply the instantiation parameters necessary to make direct calls to underlying // shared generic code, rather than calling through instantiating stubs. If the // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT // must indeed pass an instantiation parameter. methHnd = callInfo->hMethod; sig = &(callInfo->sig); callRetTyp = JITtype2varType(sig->retType); mflags = callInfo->methodFlags; #ifdef DEBUG if (verbose) { unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0; printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n", opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize); } #endif if (compIsForInlining()) { /* Does the inlinee use StackCrawlMark */ if (mflags & CORINFO_FLG_DONT_INLINE_CALLER) { compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK); return TYP_UNDEF; } /* For now ignore varargs */ if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS); return TYP_UNDEF; } if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS); return TYP_UNDEF; } if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT)) { compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL); return TYP_UNDEF; } } clsHnd = pResolvedToken->hClass; clsFlags = callInfo->classFlags; #ifdef DEBUG // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute. // This recognition should really be done by knowing the methHnd of the relevant Mark method(s). // These should be in corelib.h, and available through a JIT/EE interface call. const char* modName; const char* className; const char* methodName; if ((className = eeGetClassName(clsHnd)) != nullptr && strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 && (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0) { return impImportJitTestLabelMark(sig->numArgs); } #endif // DEBUG // <NICE> Factor this into getCallInfo </NICE> bool isSpecialIntrinsic = false; if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_INTRINSIC)) != 0) { const bool isTailCall = canTailCall && (tailCallFlags != 0); call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, isReadonlyCall, isTailCall, pConstrainedResolvedToken, callInfo->thisTransform, &ni, &isSpecialIntrinsic); if (compDonotInline()) { return TYP_UNDEF; } if (call != nullptr) { #ifdef FEATURE_READYTORUN if (call->OperGet() == GT_INTRINSIC) { if (opts.IsReadyToRun()) { noway_assert(callInfo->kind == CORINFO_CALL); call->AsIntrinsic()->gtEntryPoint = callInfo->codePointerLookup.constLookup; } else { call->AsIntrinsic()->gtEntryPoint.addr = nullptr; call->AsIntrinsic()->gtEntryPoint.accessType = IAT_VALUE; } } #endif bIntrinsicImported = true; goto DONE_CALL; } } #ifdef FEATURE_SIMD if (featureSIMD) { call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token); if (call != nullptr) { bIntrinsicImported = true; goto DONE_CALL; } } #endif // FEATURE_SIMD if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT)) { NO_WAY("Virtual call to a function added via EnC is not supported"); } if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG) { BADCODE("Bad calling convention"); } //------------------------------------------------------------------------- // Construct the call node // // Work out what sort of call we're making. // Dispense with virtual calls implemented via LDVIRTFTN immediately. constraintCallThisTransform = callInfo->thisTransform; exactContextHnd = callInfo->contextHandle; exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup; switch (callInfo->kind) { case CORINFO_VIRTUALCALL_STUB: { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); if (callInfo->stubLookup.lookupKind.needsRuntimeLookup) { if (callInfo->stubLookup.lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED) { // Runtime does not support inlining of all shapes of runtime lookups // Inlining has to be aborted in such a case compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE); return TYP_UNDEF; } GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd); assert(!compDonotInline()); // This is the rough code to set up an indirect stub call assert(stubAddr != nullptr); // The stubAddr may be a // complex expression. As it is evaluated after the args, // it may cause registered args to be spilled. Simply spill it. unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup")); impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_NONE); stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL); // Create the actual call node assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr); call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT); call->gtFlags |= GTF_CALL_VIRT_STUB; #ifdef TARGET_X86 // No tailcalls allowed for these yet... canTailCall = false; szCanTailCallFailReason = "VirtualCall with runtime lookup"; #endif } else { // The stub address is known at compile time call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); call->AsCall()->gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr; call->gtFlags |= GTF_CALL_VIRT_STUB; assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE && callInfo->stubLookup.constLookup.accessType != IAT_RELPVALUE); if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT; } } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // Null check is sometimes needed for ready to run to handle // non-virtual <-> virtual changes between versions if (callInfo->nullInstanceCheck) { call->gtFlags |= GTF_CALL_NULLCHECK; } } #endif break; } case CORINFO_VIRTUALCALL_VTABLE: { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); call->gtFlags |= GTF_CALL_VIRT_VTABLE; // Should we expand virtual call targets early for this method? // if (opts.compExpandCallsEarly) { // Mark this method to expand the virtual call target early in fgMorpgCall call->AsCall()->SetExpandedEarly(); } break; } case CORINFO_VIRTUALCALL_LDVIRTFTN: { if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN); return TYP_UNDEF; } assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); // OK, We've been told to call via LDVIRTFTN, so just // take the call now.... GenTreeCall::Use* args = impPopCallArgs(sig->numArgs, sig); GenTree* thisPtr = impPopStack().val; thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform); assert(thisPtr != nullptr); // Clone the (possibly transformed) "this" pointer GenTree* thisPtrCopy; thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("LDVIRTFTN this pointer")); GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo); assert(fptr != nullptr); thisPtr = nullptr; // can't reuse it // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer")); impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); // Create the actual call node call = gtNewIndCallNode(fptr, callRetTyp, args, di); call->AsCall()->gtCallThisArg = gtNewCallArgs(thisPtrCopy); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI)) { // CoreRT generic virtual method: need to handle potential fat function pointers addFatPointerCandidate(call->AsCall()); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // Null check is needed for ready to run to handle // non-virtual <-> virtual changes between versions call->gtFlags |= GTF_CALL_NULLCHECK; } #endif // Sine we are jumping over some code, check that its OK to skip that code assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); goto DONE; } case CORINFO_CALL: { // This is for a non-virtual, non-interface etc. call call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); // We remove the nullcheck for the GetType call intrinsic. // TODO-CQ: JIT64 does not introduce the null check for many more helper calls // and intrinsics. if (callInfo->nullInstanceCheck && !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (ni == NI_System_Object_GetType))) { call->gtFlags |= GTF_CALL_NULLCHECK; } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { call->AsCall()->setEntryPoint(callInfo->codePointerLookup.constLookup); } #endif break; } case CORINFO_CALL_CODE_POINTER: { // The EE has asked us to call by computing a code pointer and then doing an // indirect call. This is because a runtime lookup is required to get the code entry point. // These calls always follow a uniform calling convention, i.e. no extra hidden params assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0); assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG); assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); GenTree* fptr = impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod); if (compDonotInline()) { return TYP_UNDEF; } // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer")); impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); call = gtNewIndCallNode(fptr, callRetTyp, nullptr, di); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); if (callInfo->nullInstanceCheck) { call->gtFlags |= GTF_CALL_NULLCHECK; } break; } default: assert(!"unknown call kind"); break; } //------------------------------------------------------------------------- // Set more flags PREFIX_ASSUME(call != nullptr); if (mflags & CORINFO_FLG_NOGCCHECK) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK; } // Mark call if it's one of the ones we will maybe treat as an intrinsic if (isSpecialIntrinsic) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC; } } assert(sig); assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set. /* Some sanity checks */ // CALL_VIRT and NEWOBJ must have a THIS pointer assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS)); // static bit and hasThis are negations of one another assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0)); assert(call != nullptr); /*------------------------------------------------------------------------- * Check special-cases etc */ /* Special case - Check if it is a call to Delegate.Invoke(). */ if (mflags & CORINFO_FLG_DELEGATE_INVOKE) { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(mflags & CORINFO_FLG_FINAL); /* Set the delegate flag */ call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV; if (callInfo->wrapperDelegateInvoke) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_WRAPPER_DELEGATE_INV; } if (opcode == CEE_CALLVIRT) { assert(mflags & CORINFO_FLG_FINAL); /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */ assert(call->gtFlags & GTF_CALL_NULLCHECK); call->gtFlags &= ~GTF_CALL_NULLCHECK; } } CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass; actualMethodRetTypeSigClass = sig->retTypeSigClass; /* Check for varargs */ if (!compFeatureVarArg() && ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG || (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)) { BADCODE("Varargs not supported."); } if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG || (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG) { assert(!compIsForInlining()); /* Set the right flags */ call->gtFlags |= GTF_CALL_POP_ARGS; call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VARARGS; /* Can't allow tailcall for varargs as it is caller-pop. The caller will be expecting to pop a certain number of arguments, but if we tailcall to a function with a different number of arguments, we are hosed. There are ways around this (caller remembers esp value, varargs is not caller-pop, etc), but not worth it. */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_X86 if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "Callee is varargs"; } #endif /* Get the total number of arguments - this is already correct * for CALLI - for methods we have to get it from the call site */ if (opcode != CEE_CALLI) { #ifdef DEBUG unsigned numArgsDef = sig->numArgs; #endif eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); // For vararg calls we must be sure to load the return type of the // method actually being called, as well as the return types of the // specified in the vararg signature. With type equivalency, these types // may not be the same. if (sig->retTypeSigClass != actualMethodRetTypeSigClass) { if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS && sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggerred from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass); } } assert(numArgsDef <= sig->numArgs); } /* We will have "cookie" as the last argument but we cannot push * it on the operand stack because we may overflow, so we append it * to the arg list next after we pop them */ } //--------------------------- Inline NDirect ------------------------------ // For inline cases we technically should look at both the current // block and the call site block (or just the latter if we've // fused the EH trees). However the block-related checks pertain to // EH and we currently won't inline a method with EH. So for // inlinees, just checking the call site block is sufficient. { // New lexical block here to avoid compilation errors because of GOTOs. BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block); } #ifdef UNIX_X86_ABI // On Unix x86 we use caller-cleaned convention. if ((call->gtFlags & GTF_CALL_UNMANAGED) == 0) call->gtFlags |= GTF_CALL_POP_ARGS; #endif // UNIX_X86_ABI if (call->gtFlags & GTF_CALL_UNMANAGED) { // We set up the unmanaged call by linking the frame, disabling GC, etc // This needs to be cleaned up on return. // In addition, native calls have different normalization rules than managed code // (managed calling convention always widens return values in the callee) if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "Callee is native"; } checkForSmallType = true; impPopArgsForUnmanagedCall(call, sig); goto DONE; } else if ((opcode == CEE_CALLI) && ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT) && ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG)) { if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig)) { // Normally this only happens with inlining. // However, a generic method (or type) being NGENd into another module // can run into this issue as well. There's not an easy fall-back for NGEN // so instead we fallback to JIT. if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE); } else { IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)"); } return TYP_UNDEF; } GenTree* cookie = eeGetPInvokeCookie(sig); // This cookie is required to be either a simple GT_CNS_INT or // an indirection of a GT_CNS_INT // GenTree* cookieConst = cookie; if (cookie->gtOper == GT_IND) { cookieConst = cookie->AsOp()->gtOp1; } assert(cookieConst->gtOper == GT_CNS_INT); // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that // we won't allow this tree to participate in any CSE logic // cookie->gtFlags |= GTF_DONT_CSE; cookieConst->gtFlags |= GTF_DONT_CSE; call->AsCall()->gtCallCookie = cookie; if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "PInvoke calli"; } } /*------------------------------------------------------------------------- * Create the argument list */ //------------------------------------------------------------------------- // Special case - for varargs we have an implicit last argument if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { assert(!compIsForInlining()); void *varCookie, *pVarCookie; if (!info.compCompHnd->canGetVarArgsHandle(sig)) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE); return TYP_UNDEF; } varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie); assert((!varCookie) != (!pVarCookie)); GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig); assert(extraArg == nullptr); extraArg = gtNewCallArgs(cookie); } //------------------------------------------------------------------------- // Extra arg for shared generic code and array methods // // Extra argument containing instantiation information is passed in the // following circumstances: // (a) To the "Address" method on array classes; the extra parameter is // the array's type handle (a TypeDesc) // (b) To shared-code instance methods in generic structs; the extra parameter // is the struct's type handle (a vtable ptr) // (c) To shared-code per-instantiation non-generic static methods in generic // classes and structs; the extra parameter is the type handle // (d) To shared-code generic methods; the extra parameter is an // exact-instantiation MethodDesc // // We also set the exact type context associated with the call so we can // inline the call correctly later on. if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE) { assert(call->AsCall()->gtCallType == CT_USER_FUNC); if (clsHnd == nullptr) { NO_WAY("CALLI on parameterized type"); } assert(opcode != CEE_CALLI); GenTree* instParam; bool runtimeLookup; // Instantiated generic method if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD) { assert(exactContextHnd != METHOD_BEING_COMPILED_CONTEXT()); CORINFO_METHOD_HANDLE exactMethodHandle = (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK); if (!exactContextNeedsRuntimeLookup) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { instParam = impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } else #endif { instParam = gtNewIconEmbMethHndNode(exactMethodHandle); info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle); } } else { instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, true /*mustRestoreHandle*/); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } } // otherwise must be an instance method in a generic struct, // a static method in a generic type, or a runtime-generated array method else { assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS); CORINFO_CLASS_HANDLE exactClassHandle = eeGetClassFromContext(exactContextHnd); if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0) { compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD); return TYP_UNDEF; } if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall) { // We indicate "readonly" to the Address operation by using a null // instParam. instParam = gtNewIconNode(0, TYP_REF); } else if (!exactContextNeedsRuntimeLookup) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { instParam = impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } else #endif { instParam = gtNewIconEmbClsHndNode(exactClassHandle); info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle); } } else { instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, true /*mustRestoreHandle*/); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } } assert(extraArg == nullptr); extraArg = gtNewCallArgs(instParam); } if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0)) { // Only verifiable cases are supported. // dup; ldvirtftn; newobj; or ldftn; newobj. // IL test could contain unverifiable sequence, in this case optimization should not be done. if (impStackHeight() > 0) { typeInfo delegateTypeInfo = impStackTop().seTypeInfo; if (delegateTypeInfo.IsToken()) { ldftnToken = delegateTypeInfo.GetToken(); } } } //------------------------------------------------------------------------- // The main group of arguments args = impPopCallArgs(sig->numArgs, sig, extraArg); call->AsCall()->gtCallArgs = args; for (GenTreeCall::Use& use : call->AsCall()->Args()) { call->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT; } //------------------------------------------------------------------------- // The "this" pointer if (((mflags & CORINFO_FLG_STATIC) == 0) && ((sig->callConv & CORINFO_CALLCONV_EXPLICITTHIS) == 0) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr))) { GenTree* obj; if (opcode == CEE_NEWOBJ) { obj = newobjThis; } else { obj = impPopStack().val; obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform); if (compDonotInline()) { return TYP_UNDEF; } } // Store the "this" value in the call call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT; call->AsCall()->gtCallThisArg = gtNewCallArgs(obj); // Is this a virtual or interface call? if (call->AsCall()->IsVirtual()) { // only true object pointers can be virtual assert(obj->gtType == TYP_REF); // See if we can devirtualize. const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0; const bool isLateDevirtualization = false; impDevirtualizeCall(call->AsCall(), pResolvedToken, &callInfo->hMethod, &callInfo->methodFlags, &callInfo->contextHandle, &exactContextHnd, isLateDevirtualization, isExplicitTailCall, // Take care to pass raw IL offset here as the 'debug info' might be different for // inlinees. rawILOffset); // Devirtualization may change which method gets invoked. Update our local cache. // methHnd = callInfo->hMethod; } if (impIsThis(obj)) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS; } } //------------------------------------------------------------------------- // The "this" pointer for "newobj" if (opcode == CEE_NEWOBJ) { if (clsFlags & CORINFO_FLG_VAROBJSIZE) { assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately // This is a 'new' of a variable sized object, wher // the constructor is to return the object. In this case // the constructor claims to return VOID but we know it // actually returns the new object assert(callRetTyp == TYP_VOID); callRetTyp = TYP_REF; call->gtType = TYP_REF; impSpillSpecialSideEff(); impPushOnStack(call, typeInfo(TI_REF, clsHnd)); } else { if (clsFlags & CORINFO_FLG_DELEGATE) { // New inliner morph it in impImportCall. // This will allow us to inline the call to the delegate constructor. call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken); } if (!bIntrinsicImported) { #if defined(DEBUG) || defined(INLINE_DATA) // Keep track of the raw IL offset of the call call->AsCall()->gtRawILOffset = rawILOffset; #endif // defined(DEBUG) || defined(INLINE_DATA) // Is it an inline candidate? impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); } // append the call node. impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); // Now push the value of the 'new onto the stack // This is a 'new' of a non-variable sized object. // Append the new node (op1) to the statement list, // and then push the local holding the value of this // new instruction on the stack. if (clsFlags & CORINFO_FLG_VALUECLASS) { assert(newobjThis->gtOper == GT_ADDR && newobjThis->AsOp()->gtOp1->gtOper == GT_LCL_VAR); unsigned tmp = newobjThis->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack()); } else { if (newobjThis->gtOper == GT_COMMA) { // We must have inserted the callout. Get the real newobj. newobjThis = newobjThis->AsOp()->gtOp2; } assert(newobjThis->gtOper == GT_LCL_VAR); impPushOnStack(gtNewLclvNode(newobjThis->AsLclVarCommon()->GetLclNum(), TYP_REF), typeInfo(TI_REF, clsHnd)); } } return callRetTyp; } DONE: #ifdef DEBUG // In debug we want to be able to register callsites with the EE. assert(call->AsCall()->callSig == nullptr); call->AsCall()->callSig = new (this, CMK_Generic) CORINFO_SIG_INFO; *call->AsCall()->callSig = *sig; #endif // Final importer checks for calls flagged as tail calls. // if (tailCallFlags != 0) { const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0; const bool isImplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_IMPLICIT) != 0; const bool isStressTailCall = (tailCallFlags & PREFIX_TAILCALL_STRESS) != 0; // Exactly one of these should be true. assert(isExplicitTailCall != isImplicitTailCall); // This check cannot be performed for implicit tail calls for the reason // that impIsImplicitTailCallCandidate() is not checking whether return // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT. // As a result it is possible that in the following case, we find that // the type stack is non-empty if Callee() is considered for implicit // tail calling. // int Caller(..) { .... void Callee(); ret val; ... } // // Note that we cannot check return type compatibility before ImpImportCall() // as we don't have required info or need to duplicate some of the logic of // ImpImportCall(). // // For implicit tail calls, we perform this check after return types are // known to be compatible. if (isExplicitTailCall && (verCurrentState.esStackDepth != 0)) { BADCODE("Stack should be empty after tailcall"); } // For opportunistic tailcalls we allow implicit widening, i.e. tailcalls from int32 -> int16, since the // managed calling convention dictates that the callee widens the value. For explicit tailcalls we don't // want to require this detail of the calling convention to bubble up to the tailcall helpers bool allowWidening = isImplicitTailCall; if (canTailCall && !impTailCallRetTypeCompatible(allowWidening, info.compRetType, info.compMethodInfo->args.retTypeClass, info.compCallConv, callRetTyp, sig->retTypeClass, call->AsCall()->GetUnmanagedCallConv())) { canTailCall = false; szCanTailCallFailReason = "Return types are not tail call compatible"; } // Stack empty check for implicit tail calls. if (canTailCall && isImplicitTailCall && (verCurrentState.esStackDepth != 0)) { #ifdef TARGET_AMD64 // JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException // in JIT64, not an InvalidProgramException. Verify(false, "Stack should be empty after tailcall"); #else // TARGET_64BIT BADCODE("Stack should be empty after tailcall"); #endif //! TARGET_64BIT } // assert(compCurBB is not a catch, finally or filter block); // assert(compCurBB is not a try block protected by a finally block); assert(!isExplicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN); // Ask VM for permission to tailcall if (canTailCall) { // True virtual or indirect calls, shouldn't pass in a callee handle. CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->AsCall()->gtCallType != CT_USER_FUNC) || call->AsCall()->IsVirtual()) ? nullptr : methHnd; if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, isExplicitTailCall)) { if (isExplicitTailCall) { // In case of explicit tail calls, mark it so that it is not considered // for in-lining. call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL; JITDUMP("\nGTF_CALL_M_EXPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call)); if (isStressTailCall) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_STRESS_TAILCALL; JITDUMP("\nGTF_CALL_M_STRESS_TAILCALL set for call [%06u]\n", dspTreeID(call)); } } else { #if FEATURE_TAILCALL_OPT // Must be an implicit tail call. assert(isImplicitTailCall); // It is possible that a call node is both an inline candidate and marked // for opportunistic tail calling. In-lining happens before morhphing of // trees. If in-lining of an in-line candidate gets aborted for whatever // reason, it will survive to the morphing stage at which point it will be // transformed into a tail call after performing additional checks. call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL; JITDUMP("\nGTF_CALL_M_IMPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call)); #else //! FEATURE_TAILCALL_OPT NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls"); #endif // FEATURE_TAILCALL_OPT } // This might or might not turn into a tailcall. We do more // checks in morph. For explicit tailcalls we need more // information in morph in case it turns out to be a // helper-based tailcall. if (isExplicitTailCall) { assert(call->AsCall()->tailCallInfo == nullptr); call->AsCall()->tailCallInfo = new (this, CMK_CorTailCallInfo) TailCallSiteInfo; switch (opcode) { case CEE_CALLI: call->AsCall()->tailCallInfo->SetCalli(sig); break; case CEE_CALLVIRT: call->AsCall()->tailCallInfo->SetCallvirt(sig, pResolvedToken); break; default: call->AsCall()->tailCallInfo->SetCall(sig, pResolvedToken); break; } } } else { // canTailCall reported its reasons already canTailCall = false; JITDUMP("\ninfo.compCompHnd->canTailCall returned false for call [%06u]\n", dspTreeID(call)); } } else { // If this assert fires it means that canTailCall was set to false without setting a reason! assert(szCanTailCallFailReason != nullptr); JITDUMP("\nRejecting %splicit tail call for [%06u], reason: '%s'\n", isExplicitTailCall ? "ex" : "im", dspTreeID(call), szCanTailCallFailReason); info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, isExplicitTailCall, TAILCALL_FAIL, szCanTailCallFailReason); } } // Note: we assume that small return types are already normalized by the managed callee // or by the pinvoke stub for calls to unmanaged code. if (!bIntrinsicImported) { // // Things needed to be checked when bIntrinsicImported is false. // assert(call->gtOper == GT_CALL); assert(callInfo != nullptr); if (compIsForInlining() && opcode == CEE_CALLVIRT) { GenTree* callObj = call->AsCall()->gtCallThisArg->GetNode(); if ((call->AsCall()->IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, call->AsCall()->gtCallArgs, callObj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } #if defined(DEBUG) || defined(INLINE_DATA) // Keep track of the raw IL offset of the call call->AsCall()->gtRawILOffset = rawILOffset; #endif // defined(DEBUG) || defined(INLINE_DATA) // Is it an inline candidate? impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); } // Extra checks for tail calls and tail recursion. // // A tail recursive call is a potential loop from the current block to the start of the root method. // If we see a tail recursive call, mark the blocks from the call site back to the entry as potentially // being in a loop. // // Note: if we're importing an inlinee we don't mark the right set of blocks, but by then it's too // late. Currently this doesn't lead to problems. See GitHub issue 33529. // // OSR also needs to handle tail calls specially: // * block profiling in OSR methods needs to ensure probes happen before tail calls, not after. // * the root method entry must be imported if there's a recursive tail call or a potentially // inlineable tail call. // if ((tailCallFlags != 0) && canTailCall) { if (gtIsRecursiveCall(methHnd)) { assert(verCurrentState.esStackDepth == 0); BasicBlock* loopHead = nullptr; if (!compIsForInlining() && opts.IsOSR()) { // For root method OSR we may branch back to the actual method entry, // which is not fgFirstBB, and which we will need to import. assert(fgEntryBB != nullptr); loopHead = fgEntryBB; } else { // For normal jitting we may branch back to the firstBB; this // should already be imported. loopHead = fgFirstBB; } JITDUMP("\nTail recursive call [%06u] in the method. Mark " FMT_BB " to " FMT_BB " as having a backward branch.\n", dspTreeID(call), loopHead->bbNum, compCurBB->bbNum); fgMarkBackwardJump(loopHead, compCurBB); } // We only do these OSR checks in the root method because: // * If we fail to import the root method entry when importing the root method, we can't go back // and import it during inlining. So instead of checking jsut for recursive tail calls we also // have to check for anything that might introduce a recursive tail call. // * We only instrument root method blocks in OSR methods, // if (opts.IsOSR() && !compIsForInlining()) { // If a root method tail call candidate block is not a BBJ_RETURN, it should have a unique // BBJ_RETURN successor. Mark that successor so we can handle it specially during profile // instrumentation. // if (compCurBB->bbJumpKind != BBJ_RETURN) { BasicBlock* const successor = compCurBB->GetUniqueSucc(); assert(successor->bbJumpKind == BBJ_RETURN); successor->bbFlags |= BBF_TAILCALL_SUCCESSOR; optMethodFlags |= OMF_HAS_TAILCALL_SUCCESSOR; } // If this call might eventually turn into a loop back to method entry, make sure we // import the method entry. // assert(call->IsCall()); GenTreeCall* const actualCall = call->AsCall(); const bool mustImportEntryBlock = gtIsRecursiveCall(methHnd) || actualCall->IsInlineCandidate() || actualCall->IsGuardedDevirtualizationCandidate(); // Only schedule importation if we're not currently importing. // if (mustImportEntryBlock && (compCurBB != fgEntryBB)) { JITDUMP("\nOSR: inlineable or recursive tail call [%06u] in the method, so scheduling " FMT_BB " for importation\n", dspTreeID(call), fgEntryBB->bbNum); impImportBlockPending(fgEntryBB); } } } if ((sig->flags & CORINFO_SIGFLAG_FAT_CALL) != 0) { assert(opcode == CEE_CALLI || callInfo->kind == CORINFO_CALL_CODE_POINTER); addFatPointerCandidate(call->AsCall()); } DONE_CALL: // Push or append the result of the call if (callRetTyp == TYP_VOID) { if (opcode == CEE_NEWOBJ) { // we actually did push something, so don't spill the thing we just pushed. assert(verCurrentState.esStackDepth > 0); impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtDI); } else { impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } } else { impSpillSpecialSideEff(); if (clsFlags & CORINFO_FLG_ARRAY) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); } typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass); tiRetVal.NormaliseForStack(); // The CEE_READONLY prefix modifies the verification semantics of an Address // operation on an array type. if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall && tiRetVal.IsByRef()) { tiRetVal.SetIsReadonlyByRef(); } if (call->IsCall()) { // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call) GenTreeCall* origCall = call->AsCall(); const bool isFatPointerCandidate = origCall->IsFatPointerCandidate(); const bool isInlineCandidate = origCall->IsInlineCandidate(); const bool isGuardedDevirtualizationCandidate = origCall->IsGuardedDevirtualizationCandidate(); if (varTypeIsStruct(callRetTyp)) { // Need to treat all "split tree" cases here, not just inline candidates call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass); } // TODO: consider handling fatcalli cases this way too...? if (isInlineCandidate || isGuardedDevirtualizationCandidate) { // We should not have made any adjustments in impFixupCallStructReturn // as we defer those until we know the fate of the call. assert(call == origCall); assert(opts.OptEnabled(CLFLG_INLINING)); assert(!isFatPointerCandidate); // We should not try to inline calli. // Make the call its own tree (spill the stack if needed). // Do not consume the debug info here. This is particularly // important if we give up on the inline, in which case the // call will typically end up in the statement that contains // the GT_RET_EXPR that we leave on the stack. impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI, false); // TODO: Still using the widened type. GenTree* retExpr = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp), compCurBB->bbFlags); // Link the retExpr to the call so if necessary we can manipulate it later. origCall->gtInlineCandidateInfo->retExpr = retExpr; // Propagate retExpr as the placeholder for the call. call = retExpr; } else { // If the call is virtual, and has a generics context, and is not going to have a class probe, // record the context for possible use during late devirt. // // If we ever want to devirt at Tier0, and/or see issues where OSR methods under PGO lose // important devirtualizations, we'll want to allow both a class probe and a captured context. // if (origCall->IsVirtual() && (origCall->gtCallType != CT_INDIRECT) && (exactContextHnd != nullptr) && (origCall->gtClassProfileCandidateInfo == nullptr)) { JITDUMP("\nSaving context %p for call [%06u]\n", exactContextHnd, dspTreeID(origCall)); origCall->gtCallMoreFlags |= GTF_CALL_M_LATE_DEVIRT; LateDevirtualizationInfo* const info = new (this, CMK_Inlining) LateDevirtualizationInfo; info->exactContextHnd = exactContextHnd; origCall->gtLateDevirtualizationInfo = info; } if (isFatPointerCandidate) { // fatPointer candidates should be in statements of the form call() or var = call(). // Such form allows to find statements with fat calls without walking through whole trees // and removes problems with cutting trees. assert(!bIntrinsicImported); assert(IsTargetAbi(CORINFO_CORERT_ABI)); if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn. { unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli")); LclVarDsc* varDsc = lvaGetDesc(calliSlot); varDsc->lvVerTypeInfo = tiRetVal; impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE); // impAssignTempGen can change src arg list and return type for call that returns struct. var_types type = genActualType(lvaTable[calliSlot].TypeGet()); call = gtNewLclvNode(calliSlot, type); } } // For non-candidates we must also spill, since we // might have locals live on the eval stack that this // call can modify. // // Suppress this for certain well-known call targets // that we know won't modify locals, eg calls that are // recognized in gtCanOptimizeTypeEquality. Otherwise // we may break key fragile pattern matches later on. bool spillStack = true; if (call->IsCall()) { GenTreeCall* callNode = call->AsCall(); if ((callNode->gtCallType == CT_HELPER) && (gtIsTypeHandleToRuntimeTypeHelper(callNode) || gtIsTypeHandleToRuntimeTypeHandleHelper(callNode))) { spillStack = false; } else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0) { spillStack = false; } } if (spillStack) { impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call")); } } } if (!bIntrinsicImported) { //------------------------------------------------------------------------- // /* If the call is of a small type and the callee is managed, the callee will normalize the result before returning. However, we need to normalize small type values returned by unmanaged functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here if we use the shorter inlined pinvoke stub. */ if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT)) { call = gtNewCastNode(genActualType(callRetTyp), call, false, callRetTyp); } } impPushOnStack(call, tiRetVal); } // VSD functions get a new call target each time we getCallInfo, so clear the cache. // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out. // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB)) // callInfoCache.uncacheCallInfo(); return callRetTyp; } #ifdef _PREFAST_ #pragma warning(pop) #endif bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv) { CorInfoType corType = methInfo->args.retType; if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY)) { // We have some kind of STRUCT being returned structPassingKind howToReturnStruct = SPK_Unknown; var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, callConv, &howToReturnStruct); if (howToReturnStruct == SPK_ByReference) { return true; } } return false; } #ifdef DEBUG // var_types Compiler::impImportJitTestLabelMark(int numArgs) { TestLabelAndNum tlAndN; if (numArgs == 2) { tlAndN.m_num = 0; StackEntry se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); GenTree* val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue(); } else if (numArgs == 3) { StackEntry se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); GenTree* val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_num = val->AsIntConCommon()->IconValue(); se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue(); } else { assert(false); } StackEntry expSe = impPopStack(); GenTree* node = expSe.val; // There are a small number of special cases, where we actually put the annotation on a subnode. if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100) { // A loop hoist annotation with value >= 100 means that the expression should be a static field access, // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some // offset within the the static field block whose address is returned by the helper call. // The annotation is saying that this address calculation, but not the entire access, should be hoisted. assert(node->OperGet() == GT_IND); tlAndN.m_num -= 100; GetNodeTestData()->Set(node->AsOp()->gtOp1, tlAndN); GetNodeTestData()->Remove(node); } else { GetNodeTestData()->Set(node, tlAndN); } impPushOnStack(node, expSe.seTypeInfo); return node->TypeGet(); } #endif // DEBUG //----------------------------------------------------------------------------------- // impFixupCallStructReturn: For a call node that returns a struct do one of the following: // - set the flag to indicate struct return via retbuf arg; // - adjust the return type to a SIMD type if it is returned in 1 reg; // - spill call result into a temp if it is returned into 2 registers or more and not tail call or inline candidate. // // Arguments: // call - GT_CALL GenTree node // retClsHnd - Class handle of return type of the call // // Return Value: // Returns new GenTree node after fixing struct return of call node // GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd) { if (!varTypeIsStruct(call)) { return call; } call->gtRetClsHnd = retClsHnd; #if FEATURE_MULTIREG_RET call->InitializeStructReturnType(this, retClsHnd, call->GetUnmanagedCallConv()); const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc(); const unsigned retRegCount = retTypeDesc->GetReturnRegCount(); #else // !FEATURE_MULTIREG_RET const unsigned retRegCount = 1; #endif // !FEATURE_MULTIREG_RET structPassingKind howToReturnStruct; var_types returnType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct); if (howToReturnStruct == SPK_ByReference) { assert(returnType == TYP_UNKNOWN); call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG; return call; } // Recognize SIMD types as we do for LCL_VARs, // note it could be not the ABI specific type, for example, on x64 we can set 'TYP_SIMD8` // for `System.Numerics.Vector2` here but lower will change it to long as ABI dictates. var_types simdReturnType = impNormStructType(call->gtRetClsHnd); if (simdReturnType != call->TypeGet()) { assert(varTypeIsSIMD(simdReturnType)); JITDUMP("changing the type of a call [%06u] from %s to %s\n", dspTreeID(call), varTypeName(call->TypeGet()), varTypeName(simdReturnType)); call->ChangeType(simdReturnType); } if (retRegCount == 1) { return call; } #if FEATURE_MULTIREG_RET assert(varTypeIsStruct(call)); // It could be a SIMD returned in several regs. assert(returnType == TYP_STRUCT); assert((howToReturnStruct == SPK_ByValueAsHfa) || (howToReturnStruct == SPK_ByValue)); #ifdef UNIX_AMD64_ABI // must be a struct returned in two registers assert(retRegCount == 2); #else // not UNIX_AMD64_ABI assert(retRegCount >= 2); #endif // not UNIX_AMD64_ABI if (!call->CanTailCall() && !call->IsInlineCandidate()) { // Force a call returning multi-reg struct to be always of the IR form // tmp = call // // No need to assign a multi-reg struct to a local var if: // - It is a tail call or // - The call is marked for in-lining later return impAssignMultiRegTypeToVar(call, retClsHnd DEBUGARG(call->GetUnmanagedCallConv())); } return call; #endif // FEATURE_MULTIREG_RET } /***************************************************************************** For struct return values, re-type the operand in the case where the ABI does not use a struct return buffer */ //------------------------------------------------------------------------ // impFixupStructReturnType: For struct return values it sets appropriate flags in MULTIREG returns case; // in non-multiref case it handles two special helpers: `CORINFO_HELP_GETFIELDSTRUCT`, `CORINFO_HELP_UNBOX_NULLABLE`. // // Arguments: // op - the return value; // retClsHnd - the struct handle; // unmgdCallConv - the calling convention of the function that returns this struct. // // Return Value: // the result tree that does the return. // GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension unmgdCallConv) { assert(varTypeIsStruct(info.compRetType)); assert(info.compRetBuffArg == BAD_VAR_NUM); JITDUMP("\nimpFixupStructReturnType: retyping\n"); DISPTREE(op); #if defined(TARGET_XARCH) #if FEATURE_MULTIREG_RET // No VarArgs for CoreCLR on x64 Unix UNIX_AMD64_ABI_ONLY(assert(!info.compIsVarArgs)); // Is method returning a multi-reg struct? if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd, unmgdCallConv)) { // In case of multi-reg struct return, we force IR to be one of the following: // GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp). if (op->gtOper == GT_LCL_VAR) { // Note that this is a multi-reg return. unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } if (op->gtOper == GT_CALL) { return op; } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #else assert(info.compRetNativeType != TYP_STRUCT); #endif // defined(UNIX_AMD64_ABI) || defined(TARGET_X86) #elif FEATURE_MULTIREG_RET && defined(TARGET_ARM) if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd)) { if (op->gtOper == GT_LCL_VAR) { // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); // Make sure this struct type stays as struct so that we can return it as an HFA lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } if (op->gtOper == GT_CALL) { if (op->AsCall()->IsVarargs()) { // We cannot tail call because control needs to return to fixup the calling // convention for result return. op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL; op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; } else { return op; } } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #elif FEATURE_MULTIREG_RET && defined(TARGET_ARM64) // Is method returning a multi-reg struct? if (IsMultiRegReturnedType(retClsHnd, unmgdCallConv)) { if (op->gtOper == GT_LCL_VAR) { // This LCL_VAR stays as a TYP_STRUCT unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); if (!lvaIsImplicitByRefLocal(lclNum)) { // Make sure this struct type is not struct promoted lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } } if (op->gtOper == GT_CALL) { if (op->AsCall()->IsVarargs()) { // We cannot tail call because control needs to return to fixup the calling // convention for result return. op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL; op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; } else { return op; } } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #endif // FEATURE_MULTIREG_RET && TARGET_ARM64 if (!op->IsCall() || !op->AsCall()->TreatAsHasRetBufArg(this)) { // Don't retype `struct` as a primitive type in `ret` instruction. return op; } // This must be one of those 'special' helpers that don't // really have a return buffer, but instead use it as a way // to keep the trees cleaner with fewer address-taken temps. // // Well now we have to materialize the the return buffer as // an address-taken temp. Then we can return the temp. // // NOTE: this code assumes that since the call directly // feeds the return, then the call must be returning the // same structure/class/type. // unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer")); // No need to spill anything as we're about to return. impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE); op = gtNewLclvNode(tmpNum, info.compRetType); JITDUMP("\nimpFixupStructReturnType: created a pseudo-return buffer for a special helper\n"); DISPTREE(op); return op; } /***************************************************************************** CEE_LEAVE may be jumping out of a protected block, viz, a catch or a finally-protected try. We find the finally blocks protecting the current offset (in order) by walking over the complete exception table and finding enclosing clauses. This assumes that the table is sorted. This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS. If we are leaving a catch handler, we need to attach the CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks. After this function, the BBJ_LEAVE block has been converted to a different type. */ #if !defined(FEATURE_EH_FUNCLETS) void Compiler::impImportLeave(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nBefore import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created) unsigned blkAddr = block->bbCodeOffs; BasicBlock* leaveTarget = block->bbJumpDest; unsigned jmpAddr = leaveTarget->bbCodeOffs; // LEAVE clears the stack, spill side effects, and set stack to 0 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; assert(block->bbJumpKind == BBJ_LEAVE); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary BasicBlock* step = DUMMY_INIT(NULL); unsigned encFinallies = 0; // Number of enclosing finallies. GenTree* endCatches = NULL; Statement* endLFinStmt = NULL; // The statement tree to indicate the end of locally-invoked finally. unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Grab the handler offsets IL_OFFSET tryBeg = HBtab->ebdTryBegOffs(); IL_OFFSET tryEnd = HBtab->ebdTryEndOffs(); IL_OFFSET hndBeg = HBtab->ebdHndBegOffs(); IL_OFFSET hndEnd = HBtab->ebdHndEndOffs(); /* Is this a catch-handler we are CEE_LEAVEing out of? * If so, we need to call CORINFO_HELP_ENDCATCH. */ if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd)) { // Can't CEE_LEAVE out of a finally/fault handler if (HBtab->HasFinallyOrFaultHandler()) BADCODE("leave out of fault/finally block"); // Create the call to CORINFO_HELP_ENDCATCH GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID); // Make a list of all the currently pending endCatches if (endCatches) endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch); else endCatches = endCatch; #ifdef DEBUG if (verbose) { printf("impImportLeave - " FMT_BB " jumping out of catch handler EH#%u, adding call to " "CORINFO_HELP_ENDCATCH\n", block->bbNum, XTnum); } #endif } else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { /* This is a finally-protected try we are jumping out of */ /* If there are any pending endCatches, and we have already jumped out of a finally-protected try, then the endCatches have to be put in a block in an outer try for async exceptions to work correctly. Else, just use append to the original block */ BasicBlock* callBlock; assert(!encFinallies == !endLFinStmt); // if we have finallies, we better have an endLFin tree, and vice-versa if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); callBlock = block; callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY if (endCatches) impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY " "block %s\n", callBlock->dspToString()); } #endif } else { assert(step != DUMMY_INIT(NULL)); /* Calling the finally block */ callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step); assert(step->bbJumpKind == BBJ_ALWAYS); step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next // finally in the chain) step->bbJumpDest->bbRefs++; /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n", callBlock->dspToString()); } #endif Statement* lastStmt; if (endCatches) { lastStmt = gtNewStmt(endCatches); endLFinStmt->SetNextStmt(lastStmt); lastStmt->SetPrevStmt(endLFinStmt); } else { lastStmt = endLFinStmt; } // note that this sets BBF_IMPORTED on the block impEndTreeList(callBlock, endLFinStmt, lastStmt); } step = fgNewBBafter(BBJ_ALWAYS, callBlock, true); /* The new block will inherit this block's weight */ step->inheritWeight(block); step->bbFlags |= BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n", step->dspToString()); } #endif unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel; assert(finallyNesting <= compHndBBtabCount); callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler. GenTree* endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting); endLFinStmt = gtNewStmt(endLFin); endCatches = NULL; encFinallies++; invalidatePreds = true; } } /* Append any remaining endCatches, if any */ assert(!encFinallies == !endLFinStmt); if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS if (endCatches) impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG if (verbose) { printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS " "block %s\n", block->dspToString()); } #endif } else { // If leaveTarget is the start of another try block, we want to make sure that // we do not insert finalStep into that try block. Hence, we find the enclosing // try block. unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget); // Insert a new BB either in the try region indicated by tryIndex or // the handler region indicated by leaveTarget->bbHndIndex, // depending on which is the inner region. BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step); finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS; step->bbJumpDest = finalStep; /* The new block will inherit this block's weight */ finalStep->inheritWeight(block); #ifdef DEBUG if (verbose) { printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies, finalStep->dspToString()); } #endif Statement* lastStmt; if (endCatches) { lastStmt = gtNewStmt(endCatches); endLFinStmt->SetNextStmt(lastStmt); lastStmt->SetPrevStmt(endLFinStmt); } else { lastStmt = endLFinStmt; } impEndTreeList(finalStep, endLFinStmt, lastStmt); finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE // Queue up the jump target for importing impImportBlockPending(leaveTarget); invalidatePreds = true; } if (invalidatePreds && fgComputePredsDone) { JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n"); fgRemovePreds(); } #ifdef DEBUG fgVerifyHandlerTab(); if (verbose) { printf("\nAfter import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } #else // FEATURE_EH_FUNCLETS void Compiler::impImportLeave(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nBefore import CEE_LEAVE in " FMT_BB " (targetting " FMT_BB "):\n", block->bbNum, block->bbJumpDest->bbNum); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created) unsigned blkAddr = block->bbCodeOffs; BasicBlock* leaveTarget = block->bbJumpDest; unsigned jmpAddr = leaveTarget->bbCodeOffs; // LEAVE clears the stack, spill side effects, and set stack to 0 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; assert(block->bbJumpKind == BBJ_LEAVE); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary BasicBlock* step = nullptr; enum StepType { // No step type; step == NULL. ST_None, // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair? // That is, is step->bbJumpDest where a finally will return to? ST_FinallyReturn, // The step block is a catch return. ST_Catch, // The step block is in a "try", created as the target for a finally return or the target for a catch return. ST_Try }; StepType stepType = ST_None; unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Grab the handler offsets IL_OFFSET tryBeg = HBtab->ebdTryBegOffs(); IL_OFFSET tryEnd = HBtab->ebdTryEndOffs(); IL_OFFSET hndBeg = HBtab->ebdHndBegOffs(); IL_OFFSET hndEnd = HBtab->ebdHndEndOffs(); /* Is this a catch-handler we are CEE_LEAVEing out of? */ if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd)) { // Can't CEE_LEAVE out of a finally/fault handler if (HBtab->HasFinallyOrFaultHandler()) { BADCODE("leave out of fault/finally block"); } /* We are jumping out of a catch */ if (step == nullptr) { step = block; step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET stepType = ST_Catch; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a catch (EH#%u), convert block " FMT_BB " to BBJ_EHCATCHRET " "block\n", XTnum, step->bbNum); } #endif } else { BasicBlock* exitBlock; /* Create a new catch exit block in the catch region for the existing step block to jump to in this * scope */ exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step); assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch // exit) returns to this block step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ exitBlock->inheritWeight(block); exitBlock->bbFlags |= BBF_IMPORTED; /* This exit block is the new step */ step = exitBlock; stepType = ST_Catch; invalidatePreds = true; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block " FMT_BB "\n", XTnum, exitBlock->bbNum); } #endif } } else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { /* We are jumping out of a finally-protected try */ BasicBlock* callBlock; if (step == nullptr) { #if FEATURE_EH_CALLFINALLY_THUNKS // Put the call to the finally in the enclosing region. unsigned callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block); // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE, // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the // next block, and flow optimizations will remove it. block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = callBlock; block->bbJumpDest->bbRefs++; /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); callBlock->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB " to " "BBJ_ALWAYS, add BBJ_CALLFINALLY block " FMT_BB "\n", XTnum, block->bbNum, callBlock->bbNum); } #endif #else // !FEATURE_EH_CALLFINALLY_THUNKS callBlock = block; callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB " to " "BBJ_CALLFINALLY block\n", XTnum, callBlock->bbNum); } #endif #endif // !FEATURE_EH_CALLFINALLY_THUNKS } else { // Calling the finally block. We already have a step block that is either the call-to-finally from a // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by // a 'finally'), or the step block is the return from a catch. // // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will // automatically re-raise the exception, using the return address of the catch (that is, the target // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64, // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly // within the 'try' region protected by the finally, since we generate code in such a way that execution // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on // stack walks.) assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); #if FEATURE_EH_CALLFINALLY_THUNKS if (step->bbJumpKind == BBJ_EHCATCHRET) { // Need to create another step block in the 'try' region that will actually branch to the // call-to-finally thunk. BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step); step->bbJumpDest = step2; step->bbJumpDest->bbRefs++; step2->inheritWeight(block); step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is " "BBJ_EHCATCHRET (" FMT_BB "), new BBJ_ALWAYS step-step block " FMT_BB "\n", XTnum, step->bbNum, step2->bbNum); } #endif step = step2; assert(stepType == ST_Catch); // Leave it as catch type for now. } #endif // FEATURE_EH_CALLFINALLY_THUNKS #if FEATURE_EH_CALLFINALLY_THUNKS unsigned callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; #else // !FEATURE_EH_CALLFINALLY_THUNKS unsigned callFinallyTryIndex = XTnum + 1; unsigned callFinallyHndIndex = 0; // don't care #endif // !FEATURE_EH_CALLFINALLY_THUNKS callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step); step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next // finally in the chain) step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); callBlock->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY " "block " FMT_BB "\n", XTnum, callBlock->bbNum); } #endif } step = fgNewBBafter(BBJ_ALWAYS, callBlock, true); stepType = ST_FinallyReturn; /* The new block will inherit this block's weight */ step->inheritWeight(block); step->bbFlags |= BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) " "block " FMT_BB "\n", XTnum, step->bbNum); } #endif callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler. invalidatePreds = true; } else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { // We are jumping out of a catch-protected try. // // If we are returning from a call to a finally, then we must have a step block within a try // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the // finally raises an exception), the VM will find this step block, notice that it is in a protected region, // and invoke the appropriate catch. // // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception), // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM, // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target // address of the catch return as the new exception address. That is, the re-raised exception appears to // occur at the catch return address. If this exception return address skips an enclosing try/catch that // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should. // For example: // // try { // try { // // something here raises ThreadAbortException // LEAVE LABEL_1; // no need to stop at LABEL_2 // } catch (Exception) { // // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so // // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode. // // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised // // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only // // need to do this transformation if the current EH block is a try/catch that catches // // ThreadAbortException (or one of its parents), however we might not be able to find that // // information, so currently we do it for all catch types. // LEAVE LABEL_1; // Convert this to LEAVE LABEL2; // } // LABEL_2: LEAVE LABEL_1; // inserted by this step creation code // } catch (ThreadAbortException) { // } // LABEL_1: // // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C# // compiler. if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch)) { BasicBlock* catchStep; assert(step); if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); } else { assert(stepType == ST_Catch); assert(step->bbJumpKind == BBJ_EHCATCHRET); } /* Create a new exit block in the try region for the existing step block to jump to in this scope */ catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step); step->bbJumpDest = catchStep; step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ catchStep->inheritWeight(block); catchStep->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { if (stepType == ST_FinallyReturn) { printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new " "BBJ_ALWAYS block " FMT_BB "\n", XTnum, catchStep->bbNum); } else { assert(stepType == ST_Catch); printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new " "BBJ_ALWAYS block " FMT_BB "\n", XTnum, catchStep->bbNum); } } #endif // DEBUG /* This block is the new step */ step = catchStep; stepType = ST_Try; invalidatePreds = true; } } } if (step == nullptr) { block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS #ifdef DEBUG if (verbose) { printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE " "block " FMT_BB " to BBJ_ALWAYS\n", block->bbNum); } #endif } else { step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) #ifdef DEBUG if (verbose) { printf("impImportLeave - final destination of step blocks set to " FMT_BB "\n", leaveTarget->bbNum); } #endif // Queue up the jump target for importing impImportBlockPending(leaveTarget); } if (invalidatePreds && fgComputePredsDone) { JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n"); fgRemovePreds(); } #ifdef DEBUG fgVerifyHandlerTab(); if (verbose) { printf("\nAfter import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } #endif // FEATURE_EH_FUNCLETS /*****************************************************************************/ // This is called when reimporting a leave block. It resets the JumpKind, // JumpDest, and bbNext to the original values void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) { #if defined(FEATURE_EH_FUNCLETS) // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1) // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0, // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the // only predecessor are also considered orphans and attempted to be deleted. // // try { // .... // try // { // .... // leave OUTSIDE; // B0 is the block containing this leave, following this would be B1 // } finally { } // } finally { } // OUTSIDE: // // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block // where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block. // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed. To // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1 // will be treated as pair and handled correctly. if (block->bbJumpKind == BBJ_CALLFINALLY) { BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind); dupBlock->bbFlags = block->bbFlags; dupBlock->bbJumpDest = block->bbJumpDest; dupBlock->copyEHRegion(block); dupBlock->bbCatchTyp = block->bbCatchTyp; // Mark this block as // a) not referenced by any other block to make sure that it gets deleted // b) weight zero // c) prevent from being imported // d) as internal // e) as rarely run dupBlock->bbRefs = 0; dupBlock->bbWeight = BB_ZERO_WEIGHT; dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY; // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS // will be next to each other. fgInsertBBafter(block, dupBlock); #ifdef DEBUG if (verbose) { printf("New Basic Block " FMT_BB " duplicate of " FMT_BB " created.\n", dupBlock->bbNum, block->bbNum); } #endif } #endif // FEATURE_EH_FUNCLETS block->bbJumpKind = BBJ_LEAVE; fgInitBBLookup(); block->bbJumpDest = fgLookupBB(jmpAddr); // We will leave the BBJ_ALWAYS block we introduced. When it's reimported // the BBJ_ALWAYS block will be unreachable, and will be removed after. The // reason we don't want to remove the block at this point is that if we call // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be // added and the linked list length will be different than fgBBcount. } /*****************************************************************************/ // Get the first non-prefix opcode. Used for verification of valid combinations // of prefixes and actual opcodes. OPCODE Compiler::impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp) { while (codeAddr < codeEndp) { OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); if (opcode == CEE_PREFIX1) { if (codeAddr >= codeEndp) { break; } opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256); codeAddr += sizeof(__int8); } switch (opcode) { case CEE_UNALIGNED: case CEE_VOLATILE: case CEE_TAILCALL: case CEE_CONSTRAINED: case CEE_READONLY: break; default: return opcode; } codeAddr += opcodeSizes[opcode]; } return CEE_ILLEGAL; } /*****************************************************************************/ // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes void Compiler::impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix) { OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (!( // Opcode of all ldind and stdind happen to be in continuous, except stind.i. ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) || (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) || (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) || // volatile. prefix is allowed with the ldsfld and stsfld (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD))))) { BADCODE("Invalid opcode for unaligned. or volatile. prefix"); } } /*****************************************************************************/ #ifdef DEBUG #undef RETURN // undef contracts RETURN macro enum controlFlow_t { NEXT, CALL, RETURN, THROW, BRANCH, COND_BRANCH, BREAK, PHI, META, }; const static controlFlow_t controlFlow[] = { #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow, #include "opcode.def" #undef OPDEF }; #endif // DEBUG /***************************************************************************** * Determine the result type of an arithemetic operation * On 64-bit inserts upcasts when native int is mixed with int32 */ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2) { var_types type = TYP_UNDEF; GenTree* op1 = *pOp1; GenTree* op2 = *pOp2; // Arithemetic operations are generally only allowed with // primitive types, but certain operations are allowed // with byrefs if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF)) { if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF)) { // byref1-byref2 => gives a native int type = TYP_I_IMPL; } else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF)) { // [native] int - byref => gives a native int // // The reason is that it is possible, in managed C++, // to have a tree like this: // // - // / \. // / \. // / \. // / \. // const(h) int addr byref // // <BUGNUM> VSW 318822 </BUGNUM> // // So here we decide to make the resulting type to be a native int. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_I_IMPL; } else { // byref - [native] int => gives a byref assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet())); #ifdef TARGET_64BIT if ((genActualType(op2->TypeGet()) != TYP_I_IMPL)) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_BYREF; } } else if ((oper == GT_ADD) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF)) { // byref + [native] int => gives a byref // (or) // [native] int + byref => gives a byref // only one can be a byref : byref op byref not allowed assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF); assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet())); #ifdef TARGET_64BIT if (genActualType(op2->TypeGet()) == TYP_BYREF) { if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } } else if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_BYREF; } #ifdef TARGET_64BIT else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL) { assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType)); // int + long => gives long // long + int => gives long // we get this because in the IL the long isn't Int64, it's just IntPtr if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } else if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } type = TYP_I_IMPL; } #else // 32-bit TARGET else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG) { assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType)); // int + long => gives long // long + int => gives long type = TYP_LONG; } #endif // TARGET_64BIT else { // int + int => gives an int assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF); assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) || (varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))); type = genActualType(op1->gtType); // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT. // Otherwise, turn floats into doubles if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT)) { assert(genActualType(op2->gtType) == TYP_DOUBLE); type = TYP_DOUBLE; } } assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT); return type; } //------------------------------------------------------------------------ // impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting // // Arguments: // op1 - value to cast // pResolvedToken - resolved token for type to cast to // isCastClass - true if this is a castclass, false if isinst // // Return Value: // tree representing optimized cast, or null if no optimization possible GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass) { assert(op1->TypeGet() == TYP_REF); // Don't optimize for minopts or debug codegen. if (opts.OptimizationDisabled()) { return nullptr; } // See what we know about the type of the object being cast. bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull); if (fromClass != nullptr) { CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass; JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst", isExact ? "exact " : "", dspPtr(fromClass), info.compCompHnd->getClassName(fromClass), dspPtr(toClass), info.compCompHnd->getClassName(toClass)); // Perhaps we know if the cast will succeed or fail. TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass); if (castResult == TypeCompareState::Must) { // Cast will succeed, result is simply op1. JITDUMP("Cast will succeed, optimizing to simply return input\n"); return op1; } else if (castResult == TypeCompareState::MustNot) { // See if we can sharpen exactness by looking for final classes if (!isExact) { isExact = impIsClassExact(fromClass); } // Cast to exact type will fail. Handle case where we have // an exact type (that is, fromClass is not a subtype) // and we're not going to throw on failure. if (isExact && !isCastClass) { JITDUMP("Cast will fail, optimizing to return null\n"); GenTree* result = gtNewIconNode(0, TYP_REF); // If the cast was fed by a box, we can remove that too. if (op1->IsBoxedValue()) { JITDUMP("Also removing upstream box\n"); gtTryRemoveBoxUpstreamEffects(op1); } return result; } else if (isExact) { JITDUMP("Not optimizing failing castclass (yet)\n"); } else { JITDUMP("Can't optimize since fromClass is inexact\n"); } } else { JITDUMP("Result of cast unknown, must generate runtime test\n"); } } else { JITDUMP("\nCan't optimize since fromClass is unknown\n"); } return nullptr; } //------------------------------------------------------------------------ // impCastClassOrIsInstToTree: build and import castclass/isinst // // Arguments: // op1 - value to cast // op2 - type handle for type to cast to // pResolvedToken - resolved token from the cast operation // isCastClass - true if this is castclass, false means isinst // // Return Value: // Tree representing the cast // // Notes: // May expand into a series of runtime checks or a helper call. GenTree* Compiler::impCastClassOrIsInstToTree( GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset) { assert(op1->TypeGet() == TYP_REF); // Optimistically assume the jit should expand this as an inline test bool shouldExpandInline = true; // Profitability check. // // Don't bother with inline expansion when jit is trying to // generate code quickly, or the cast is in code that won't run very // often, or the method already is pretty big. if (compCurBB->isRunRarely() || opts.OptimizationDisabled()) { // not worth the code expansion if jitting fast or in a rarely run block shouldExpandInline = false; } else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals()) { // not worth creating an untracked local variable shouldExpandInline = false; } else if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) && (JitConfig.JitCastProfiling() == 1)) { // Optimizations are enabled but we're still instrumenting (including casts) if (isCastClass && !impIsClassExact(pResolvedToken->hClass)) { // Usually, we make a speculative assumption that it makes sense to expand castclass // even for non-sealed classes, but let's rely on PGO in this specific case shouldExpandInline = false; } } // Pessimistically assume the jit cannot expand this as an inline test bool canExpandInline = false; const CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass); // Legality check. // // Not all classclass/isinst operations can be inline expanded. // Check legality only if an inline expansion is desirable. if (shouldExpandInline) { if (isCastClass) { // Jit can only inline expand the normal CHKCASTCLASS helper. canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS); } else { if (helper == CORINFO_HELP_ISINSTANCEOFCLASS) { // If the class is exact, the jit can expand the IsInst check inline. canExpandInline = impIsClassExact(pResolvedToken->hClass); } } } const bool expandInline = canExpandInline && shouldExpandInline; if (!expandInline) { JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst", canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal"); // If we CSE this class handle we prevent assertionProp from making SubType assertions // so instead we force the CSE logic to not consider CSE-ing this class handle. // op2->gtFlags |= GTF_DONT_CSE; GenTreeCall* call = gtNewHelperCallNode(helper, TYP_REF, gtNewCallArgs(op2, op1)); if (impIsCastHelperEligibleForClassProbe(call) && !impIsClassExact(pResolvedToken->hClass)) { ClassProfileCandidateInfo* pInfo = new (this, CMK_Inlining) ClassProfileCandidateInfo; pInfo->ilOffset = ilOffset; pInfo->probeIndex = info.compClassProbeCount++; call->gtClassProfileCandidateInfo = pInfo; compCurBB->bbFlags |= BBF_HAS_CLASS_PROFILE; } return call; } JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst"); impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2")); GenTree* temp; GenTree* condMT; // // expand the methodtable match: // // condMT ==> GT_NE // / \. // GT_IND op2 (typically CNS_INT) // | // op1Copy // // This can replace op1 with a GT_COMMA that evaluates op1 into a local // op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1")); // // op1 is now known to be a non-complex tree // thus we can use gtClone(op1) from now on // GenTree* op2Var = op2; if (isCastClass) { op2Var = fgInsertCommaFormTemp(&op2); lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true; } temp = gtNewMethodTableLookup(temp); condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2); GenTree* condNull; // // expand the null check: // // condNull ==> GT_EQ // / \. // op1Copy CNS_INT // null // condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF)); // // expand the true and false trees for the condMT // GenTree* condFalse = gtClone(op1); GenTree* condTrue; if (isCastClass) { // // use the special helper that skips the cases checked by our inlined cast // const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL; condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewCallArgs(op2Var, gtClone(op1))); } else { condTrue = gtNewIconNode(0, TYP_REF); } GenTree* qmarkMT; // // Generate first QMARK - COLON tree // // qmarkMT ==> GT_QMARK // / \. // condMT GT_COLON // / \. // condFalse condTrue // temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse); qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp->AsColon()); if (isCastClass && impIsClassExact(pResolvedToken->hClass) && condTrue->OperIs(GT_CALL)) { // condTrue is used only for throwing InvalidCastException in case of casting to an exact class. condTrue->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN; } GenTree* qmarkNull; // // Generate second QMARK - COLON tree // // qmarkNull ==> GT_QMARK // / \. // condNull GT_COLON // / \. // qmarkMT op1Copy // temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT); qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp->AsColon()); qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF; // Make QMark node a top level node by spilling it. unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2")); impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE); // TODO-CQ: Is it possible op1 has a better type? // // See also gtGetHelperCallClassHandle where we make the same // determination for the helper call variants. LclVarDsc* lclDsc = lvaGetDesc(tmp); assert(lclDsc->lvSingleDef == 0); lclDsc->lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmp); lvaSetClass(tmp, pResolvedToken->hClass); return gtNewLclvNode(tmp, TYP_REF); } #ifndef DEBUG #define assertImp(cond) ((void)0) #else #define assertImp(cond) \ do \ { \ if (!(cond)) \ { \ const int cchAssertImpBuf = 600; \ char* assertImpBuf = (char*)_alloca(cchAssertImpBuf); \ _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \ "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \ impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \ op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \ assertAbort(assertImpBuf, __FILE__, __LINE__); \ } \ } while (0) #endif // DEBUG //------------------------------------------------------------------------ // impBlockIsInALoop: check if a block might be in a loop // // Arguments: // block - block to check // // Returns: // true if the block might be in a loop. // // Notes: // Conservatively correct; may return true for some blocks that are // not actually in loops. // bool Compiler::impBlockIsInALoop(BasicBlock* block) { return (compIsForInlining() && ((impInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) != 0)) || ((block->bbFlags & BBF_BACKWARD_JUMP) != 0); } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif /***************************************************************************** * Import the instr for the given basic block */ void Compiler::impImportBlockCode(BasicBlock* block) { #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind) #ifdef DEBUG if (verbose) { printf("\nImporting " FMT_BB " (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName); } #endif unsigned nxtStmtIndex = impInitBlockLineInfo(); IL_OFFSET nxtStmtOffs; CorInfoHelpFunc helper; CorInfoIsAccessAllowedResult accessAllowedResult; CORINFO_HELPER_DESC calloutHelper; const BYTE* lastLoadToken = nullptr; /* Get the tree list started */ impBeginTreeList(); #ifdef FEATURE_ON_STACK_REPLACEMENT bool enablePatchpoints = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0); #ifdef DEBUG // Optionally suppress patchpoints by method hash // static ConfigMethodRange JitEnablePatchpointRange; JitEnablePatchpointRange.EnsureInit(JitConfig.JitEnablePatchpointRange()); const unsigned hash = impInlineRoot()->info.compMethodHash(); const bool inRange = JitEnablePatchpointRange.Contains(hash); enablePatchpoints &= inRange; #endif // DEBUG if (enablePatchpoints) { // We don't inline at Tier0, if we do, we may need rethink our approach. // Could probably support inlines that don't introduce flow. // assert(!compIsForInlining()); // OSR is not yet supported for methods with explicit tail calls. // // But we also do not have to switch these methods to be optimized, as we should be // able to avoid getting trapped in Tier0 code by normal call counting. // So instead, just suppress adding patchpoints. // if (!compTailPrefixSeen) { // We only need to add patchpoints if the method can loop. // if (compHasBackwardJump) { assert(compCanHavePatchpoints()); // By default we use the "adaptive" strategy. // // This can create both source and target patchpoints within a given // loop structure, which isn't ideal, but is not incorrect. We will // just have some extra Tier0 overhead. // // Todo: implement support for mid-block patchpoints. If `block` // is truly a backedge source (and not in a handler) then we should be // able to find a stack empty point somewhere in the block. // const int patchpointStrategy = JitConfig.TC_PatchpointStrategy(); bool addPatchpoint = false; bool mustUseTargetPatchpoint = false; switch (patchpointStrategy) { default: { // Patchpoints at backedge sources, if possible, otherwise targets. // addPatchpoint = ((block->bbFlags & BBF_BACKWARD_JUMP_SOURCE) == BBF_BACKWARD_JUMP_SOURCE); mustUseTargetPatchpoint = (verCurrentState.esStackDepth != 0) || block->hasHndIndex(); break; } case 1: { // Patchpoints at stackempty backedge targets. // Note if we have loops where the IL stack is not empty on the backedge we can't patchpoint // them. // // We should not have allowed OSR if there were backedges in handlers. // assert(!block->hasHndIndex()); addPatchpoint = ((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET) && (verCurrentState.esStackDepth == 0); break; } case 2: { // Adaptive strategy. // // Patchpoints at backedge targets if there are multiple backedges, // otherwise at backedge sources, if possible. Note a block can be both; if so we // just need one patchpoint. // if ((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET) { // We don't know backedge count, so just use ref count. // addPatchpoint = (block->bbRefs > 1) && (verCurrentState.esStackDepth == 0); } if (!addPatchpoint && ((block->bbFlags & BBF_BACKWARD_JUMP_SOURCE) == BBF_BACKWARD_JUMP_SOURCE)) { addPatchpoint = true; mustUseTargetPatchpoint = (verCurrentState.esStackDepth != 0) || block->hasHndIndex(); // Also force target patchpoint if target block has multiple (backedge) preds. // if (!mustUseTargetPatchpoint) { for (BasicBlock* const succBlock : block->Succs(this)) { if ((succBlock->bbNum <= block->bbNum) && (succBlock->bbRefs > 1)) { mustUseTargetPatchpoint = true; break; } } } } break; } } if (addPatchpoint) { if (mustUseTargetPatchpoint) { // We wanted a source patchpoint, but could not have one. // So, add patchpoints to the backedge targets. // for (BasicBlock* const succBlock : block->Succs(this)) { if (succBlock->bbNum <= block->bbNum) { // The succBlock had better agree it's a target. // assert((succBlock->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET); // We may already have decided to put a patchpoint in succBlock. If not, add one. // if ((succBlock->bbFlags & BBF_PATCHPOINT) != 0) { // In some cases the target may not be stack-empty at entry. // If so, we will bypass patchpoints for this backedge. // if (succBlock->bbStackDepthOnEntry() > 0) { JITDUMP("\nCan't set source patchpoint at " FMT_BB ", can't use target " FMT_BB " as it has non-empty stack on entry.\n", block->bbNum, succBlock->bbNum); } else { JITDUMP("\nCan't set source patchpoint at " FMT_BB ", using target " FMT_BB " instead\n", block->bbNum, succBlock->bbNum); assert(!succBlock->hasHndIndex()); succBlock->bbFlags |= BBF_PATCHPOINT; } } } } } else { assert(!block->hasHndIndex()); block->bbFlags |= BBF_PATCHPOINT; } setMethodHasPatchpoint(); } } else { // Should not see backward branch targets w/o backwards branches. // So if !compHasBackwardsBranch, these flags should never be set. // assert((block->bbFlags & (BBF_BACKWARD_JUMP_TARGET | BBF_BACKWARD_JUMP_SOURCE)) == 0); } } #ifdef DEBUG // As a stress test, we can place patchpoints at the start of any block // that is a stack empty point and is not within a handler. // // Todo: enable for mid-block stack empty points too. // const int offsetOSR = JitConfig.JitOffsetOnStackReplacement(); const int randomOSR = JitConfig.JitRandomOnStackReplacement(); const bool tryOffsetOSR = offsetOSR >= 0; const bool tryRandomOSR = randomOSR > 0; if (compCanHavePatchpoints() && (tryOffsetOSR || tryRandomOSR) && (verCurrentState.esStackDepth == 0) && !block->hasHndIndex() && ((block->bbFlags & BBF_PATCHPOINT) == 0)) { // Block start can have a patchpoint. See if we should add one. // bool addPatchpoint = false; // Specific offset? // if (tryOffsetOSR) { if (impCurOpcOffs == (unsigned)offsetOSR) { addPatchpoint = true; } } // Random? // else { // Reuse the random inliner's random state. // Note m_inlineStrategy is always created, even if we're not inlining. // CLRRandom* const random = impInlineRoot()->m_inlineStrategy->GetRandom(randomOSR); const int randomValue = (int)random->Next(100); addPatchpoint = (randomValue < randomOSR); } if (addPatchpoint) { block->bbFlags |= BBF_PATCHPOINT; setMethodHasPatchpoint(); } JITDUMP("\n** %s patchpoint%s added to " FMT_BB " (il offset %u)\n", tryOffsetOSR ? "offset" : "random", addPatchpoint ? "" : " not", block->bbNum, impCurOpcOffs); } #endif // DEBUG } // Mark stack-empty rare blocks to be considered for partial compilation. // // Ideally these are conditionally executed blocks -- if the method is going // to unconditionally throw, there's not as much to be gained by deferring jitting. // For now, we just screen out the entry bb. // // In general we might want track all the IL stack empty points so we can // propagate rareness back through flow and place the partial compilation patchpoints "earlier" // so there are fewer overall. // // Note unlike OSR, it's ok to forgo these. // // Todo: stress mode... // if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_PartialCompilation() > 0) && compCanHavePatchpoints() && !compTailPrefixSeen) { // Is this block a good place for partial compilation? // if ((block != fgFirstBB) && block->isRunRarely() && (verCurrentState.esStackDepth == 0) && ((block->bbFlags & BBF_PATCHPOINT) == 0) && !block->hasHndIndex()) { JITDUMP("\nBlock " FMT_BB " will be a partial compilation patchpoint -- not importing\n", block->bbNum); block->bbFlags |= BBF_PARTIAL_COMPILATION_PATCHPOINT; setMethodHasPartialCompilationPatchpoint(); // Change block to BBJ_THROW so we won't trigger importation of successors. // block->bbJumpKind = BBJ_THROW; // If this method has a explicit generic context, the only uses of it may be in // the IL for this block. So assume it's used. // if (info.compMethodInfo->options & (CORINFO_GENERICS_CTXT_FROM_METHODDESC | CORINFO_GENERICS_CTXT_FROM_METHODTABLE)) { lvaGenericsContextInUse = true; } return; } } #endif // FEATURE_ON_STACK_REPLACEMENT /* Walk the opcodes that comprise the basic block */ const BYTE* codeAddr = info.compCode + block->bbCodeOffs; const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd; IL_OFFSET opcodeOffs = block->bbCodeOffs; IL_OFFSET lastSpillOffs = opcodeOffs; signed jmpDist; /* remember the start of the delegate creation sequence (used for verification) */ const BYTE* delegateCreateStart = nullptr; int prefixFlags = 0; bool explicitTailCall, constraintCall, readonlyCall; typeInfo tiRetVal; unsigned numArgs = info.compArgsCount; /* Now process all the opcodes in the block */ var_types callTyp = TYP_COUNT; OPCODE prevOpcode = CEE_ILLEGAL; if (block->bbCatchTyp) { if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { impCurStmtOffsSet(block->bbCodeOffs); } // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block // to a temp. This is a trade off for code simplicity impSpillSpecialSideEff(); } while (codeAddr < codeEndp) { #ifdef FEATURE_READYTORUN bool usingReadyToRunHelper = false; #endif CORINFO_RESOLVED_TOKEN resolvedToken; CORINFO_RESOLVED_TOKEN constrainedResolvedToken; CORINFO_CALL_INFO callInfo; CORINFO_FIELD_INFO fieldInfo; tiRetVal = typeInfo(); // Default type info //--------------------------------------------------------------------- /* We need to restrict the max tree depth as many of the Compiler functions are recursive. We do this by spilling the stack */ if (verCurrentState.esStackDepth) { /* Has it been a while since we last saw a non-empty stack (which guarantees that the tree depth isnt accumulating. */ if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode)) { impSpillStackEnsure(); lastSpillOffs = opcodeOffs; } } else { lastSpillOffs = opcodeOffs; impBoxTempInUse = false; // nothing on the stack, box temp OK to use again } /* Compute the current instr offset */ opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); #ifndef DEBUG if (opts.compDbgInfo) #endif { nxtStmtOffs = (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET; /* Have we reached the next stmt boundary ? */ if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs) { assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]); if (verCurrentState.esStackDepth != 0 && opts.compDbgCode) { /* We need to provide accurate IP-mapping at this point. So spill anything on the stack so that it will form gtStmts with the correct stmt offset noted */ impSpillStackEnsure(true); } // Have we reported debug info for any tree? if (impCurStmtDI.IsValid() && opts.compDbgCode) { GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); assert(!impCurStmtDI.IsValid()); } if (!impCurStmtDI.IsValid()) { /* Make sure that nxtStmtIndex is in sync with opcodeOffs. If opcodeOffs has gone past nxtStmtIndex, catch up */ while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount && info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs) { nxtStmtIndex++; } /* Go to the new stmt */ impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]); /* Update the stmt boundary index */ nxtStmtIndex++; assert(nxtStmtIndex <= info.compStmtOffsetsCount); /* Are there any more line# entries after this one? */ if (nxtStmtIndex < info.compStmtOffsetsCount) { /* Remember where the next line# starts */ nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex]; } else { /* No more line# entries */ nxtStmtOffs = BAD_IL_OFFSET; } } } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) && (verCurrentState.esStackDepth == 0)) { /* At stack-empty locations, we have already added the tree to the stmt list with the last offset. We just need to update impCurStmtDI */ impCurStmtOffsSet(opcodeOffs); } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) && impOpcodeIsCallSiteBoundary(prevOpcode)) { /* Make sure we have a type cached */ assert(callTyp != TYP_COUNT); if (callTyp == TYP_VOID) { impCurStmtOffsSet(opcodeOffs); } else if (opts.compDbgCode) { impSpillStackEnsure(true); impCurStmtOffsSet(opcodeOffs); } } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP)) { if (opts.compDbgCode) { impSpillStackEnsure(true); } impCurStmtOffsSet(opcodeOffs); } assert(!impCurStmtDI.IsValid() || (nxtStmtOffs == BAD_IL_OFFSET) || (impCurStmtDI.GetLocation().GetOffset() <= nxtStmtOffs)); } CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL); CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL); CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL); var_types lclTyp, ovflType = TYP_UNKNOWN; GenTree* op1 = DUMMY_INIT(NULL); GenTree* op2 = DUMMY_INIT(NULL); GenTree* newObjThisPtr = DUMMY_INIT(NULL); bool uns = DUMMY_INIT(false); bool isLocal = false; /* Get the next opcode and the size of its parameters */ OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); #ifdef DEBUG impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1); JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs); #endif DECODE_OPCODE: // Return if any previous code has caused inline to fail. if (compDonotInline()) { return; } /* Get the size of additional parameters */ signed int sz = opcodeSizes[opcode]; #ifdef DEBUG clsHnd = NO_CLASS_HANDLE; lclTyp = TYP_COUNT; callTyp = TYP_COUNT; impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1); impCurOpcName = opcodeNames[opcode]; if (verbose && (opcode != CEE_PREFIX1)) { printf("%s", impCurOpcName); } /* Use assertImp() to display the opcode */ op1 = op2 = nullptr; #endif /* See what kind of an opcode we have, then */ unsigned mflags = 0; unsigned clsFlags = 0; switch (opcode) { unsigned lclNum; var_types type; GenTree* op3; genTreeOps oper; unsigned size; int val; CORINFO_SIG_INFO sig; IL_OFFSET jmpAddr; bool ovfl, unordered, callNode; bool ldstruct; CORINFO_CLASS_HANDLE tokenType; union { int intVal; float fltVal; __int64 lngVal; double dblVal; } cval; case CEE_PREFIX1: opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256); opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); codeAddr += sizeof(__int8); goto DECODE_OPCODE; SPILL_APPEND: // We need to call impSpillLclRefs() for a struct type lclVar. // This is because there may be loads of that lclVar on the evaluation stack, and // we need to ensure that those loads are completed before we modify it. if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtGetOp1())) { GenTree* lhs = op1->gtGetOp1(); GenTreeLclVarCommon* lclVar = nullptr; if (lhs->gtOper == GT_LCL_VAR) { lclVar = lhs->AsLclVarCommon(); } else if (lhs->OperIsBlk()) { // Check if LHS address is within some struct local, to catch // cases where we're updating the struct by something other than a stfld GenTree* addr = lhs->AsBlk()->Addr(); // Catches ADDR(LCL_VAR), or ADD(ADDR(LCL_VAR),CNS_INT)) lclVar = addr->IsLocalAddrExpr(); // Catches ADDR(FIELD(... ADDR(LCL_VAR))) if (lclVar == nullptr) { GenTree* lclTree = nullptr; if (impIsAddressInLocal(addr, &lclTree)) { lclVar = lclTree->AsLclVarCommon(); } } } if (lclVar != nullptr) { impSpillLclRefs(lclVar->GetLclNum()); } } /* Append 'op1' to the list of statements */ impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); goto DONE_APPEND; APPEND: /* Append 'op1' to the list of statements */ impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); goto DONE_APPEND; DONE_APPEND: #ifdef DEBUG // Remember at which BC offset the tree was finished impNoteLastILoffs(); #endif break; case CEE_LDNULL: impPushNullObjRefOnStack(); break; case CEE_LDC_I4_M1: case CEE_LDC_I4_0: case CEE_LDC_I4_1: case CEE_LDC_I4_2: case CEE_LDC_I4_3: case CEE_LDC_I4_4: case CEE_LDC_I4_5: case CEE_LDC_I4_6: case CEE_LDC_I4_7: case CEE_LDC_I4_8: cval.intVal = (opcode - CEE_LDC_I4_0); assert(-1 <= cval.intVal && cval.intVal <= 8); goto PUSH_I4CON; case CEE_LDC_I4_S: cval.intVal = getI1LittleEndian(codeAddr); goto PUSH_I4CON; case CEE_LDC_I4: cval.intVal = getI4LittleEndian(codeAddr); goto PUSH_I4CON; PUSH_I4CON: JITDUMP(" %d", cval.intVal); impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT)); break; case CEE_LDC_I8: cval.lngVal = getI8LittleEndian(codeAddr); JITDUMP(" 0x%016llx", cval.lngVal); impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG)); break; case CEE_LDC_R8: cval.dblVal = getR8LittleEndian(codeAddr); JITDUMP(" %#.17g", cval.dblVal); impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE)); break; case CEE_LDC_R4: cval.dblVal = getR4LittleEndian(codeAddr); JITDUMP(" %#.17g", cval.dblVal); impPushOnStack(gtNewDconNode(cval.dblVal, TYP_FLOAT), typeInfo(TI_DOUBLE)); break; case CEE_LDSTR: val = getU4LittleEndian(codeAddr); JITDUMP(" %08X", val); impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal); break; case CEE_LDARG: lclNum = getU2LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDARG_S: lclNum = getU1LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDARG_0: case CEE_LDARG_1: case CEE_LDARG_2: case CEE_LDARG_3: lclNum = (opcode - CEE_LDARG_0); assert(lclNum >= 0 && lclNum < 4); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC: lclNum = getU2LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC_S: lclNum = getU1LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC_0: case CEE_LDLOC_1: case CEE_LDLOC_2: case CEE_LDLOC_3: lclNum = (opcode - CEE_LDLOC_0); assert(lclNum >= 0 && lclNum < 4); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_STARG: lclNum = getU2LittleEndian(codeAddr); goto STARG; case CEE_STARG_S: lclNum = getU1LittleEndian(codeAddr); STARG: JITDUMP(" %u", lclNum); if (compIsForInlining()) { op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo); noway_assert(op1->gtOper == GT_LCL_VAR); lclNum = op1->AsLclVar()->GetLclNum(); goto VAR_ST_VALID; } lclNum = compMapILargNum(lclNum); // account for possible hidden param assertImp(lclNum < numArgs); if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } // We should have seen this arg write in the prescan assert(lvaTable[lclNum].lvHasILStoreOp); goto VAR_ST; case CEE_STLOC: lclNum = getU2LittleEndian(codeAddr); isLocal = true; JITDUMP(" %u", lclNum); goto LOC_ST; case CEE_STLOC_S: lclNum = getU1LittleEndian(codeAddr); isLocal = true; JITDUMP(" %u", lclNum); goto LOC_ST; case CEE_STLOC_0: case CEE_STLOC_1: case CEE_STLOC_2: case CEE_STLOC_3: isLocal = true; lclNum = (opcode - CEE_STLOC_0); assert(lclNum >= 0 && lclNum < 4); LOC_ST: if (compIsForInlining()) { lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo; /* Have we allocated a temp for this local? */ lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp")); goto _PopValue; } lclNum += numArgs; VAR_ST: if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var) { BADCODE("Bad IL"); } VAR_ST_VALID: /* if it is a struct assignment, make certain we don't overflow the buffer */ assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd)); if (lvaTable[lclNum].lvNormalizeOnLoad()) { lclTyp = lvaGetRealType(lclNum); } else { lclTyp = lvaGetActualType(lclNum); } _PopValue: /* Pop the value being assigned */ { StackEntry se = impPopStack(); clsHnd = se.seTypeInfo.GetClassHandle(); op1 = se.val; tiRetVal = se.seTypeInfo; } #ifdef FEATURE_SIMD if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet())) { assert(op1->TypeGet() == TYP_STRUCT); op1->gtType = lclTyp; } #endif // FEATURE_SIMD op1 = impImplicitIorI4Cast(op1, lclTyp); #ifdef TARGET_64BIT // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT)) { op1 = gtNewCastNode(TYP_INT, op1, false, TYP_INT); } #endif // TARGET_64BIT // We had better assign it a value of the correct type assertImp( genActualType(lclTyp) == genActualType(op1->gtType) || (genActualType(lclTyp) == TYP_I_IMPL && op1->IsLocalAddrExpr() != nullptr) || (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) || (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) || (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) || ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF)); /* If op1 is "&var" then its type is the transient "*" and it can be used either as TYP_BYREF or TYP_I_IMPL */ if (op1->IsLocalAddrExpr() != nullptr) { assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF); /* When "&var" is created, we assume it is a byref. If it is being assigned to a TYP_I_IMPL var, change the type to prevent unnecessary GC info */ if (genActualType(lclTyp) == TYP_I_IMPL) { op1->gtType = TYP_I_IMPL; } } // If this is a local and the local is a ref type, see // if we can improve type information based on the // value being assigned. if (isLocal && (lclTyp == TYP_REF)) { // We should have seen a stloc in our IL prescan. assert(lvaTable[lclNum].lvHasILStoreOp); // Is there just one place this local is defined? const bool isSingleDefLocal = lvaTable[lclNum].lvSingleDef; // Conservative check that there is just one // definition that reaches this store. const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0); if (isSingleDefLocal && hasSingleReachingDef) { lvaUpdateClass(lclNum, op1, clsHnd); } } /* Filter out simple assignments to itself */ if (op1->gtOper == GT_LCL_VAR && lclNum == op1->AsLclVarCommon()->GetLclNum()) { if (opts.compDbgCode) { op1 = gtNewNothingNode(); goto SPILL_APPEND; } else { break; } } /* Create the assignment node */ op2 = gtNewLclvNode(lclNum, lclTyp DEBUGARG(opcodeOffs + sz + 1)); /* If the local is aliased or pinned, we need to spill calls and indirections from the stack. */ if ((lvaTable[lclNum].IsAddressExposed() || lvaTable[lclNum].lvHasLdAddrOp || lvaTable[lclNum].lvPinned) && (verCurrentState.esStackDepth > 0)) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned")); } /* Spill any refs to the local from the stack */ impSpillLclRefs(lclNum); // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE // We insert a cast to the dest 'op2' type // if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { op1 = gtNewCastNode(op2->TypeGet(), op1, false, op2->TypeGet()); } if (varTypeIsStruct(lclTyp)) { op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL); } else { op1 = gtNewAssignNode(op2, op1); } goto SPILL_APPEND; case CEE_LDLOCA: lclNum = getU2LittleEndian(codeAddr); goto LDLOCA; case CEE_LDLOCA_S: lclNum = getU1LittleEndian(codeAddr); LDLOCA: JITDUMP(" %u", lclNum); if (compIsForInlining()) { // Get the local type lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo; /* Have we allocated a temp for this local? */ lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp")); assert(!lvaGetDesc(lclNum)->lvNormalizeOnLoad()); op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum)); goto _PUSH_ADRVAR; } lclNum += numArgs; assertImp(lclNum < info.compLocalsCount); goto ADRVAR; case CEE_LDARGA: lclNum = getU2LittleEndian(codeAddr); goto LDARGA; case CEE_LDARGA_S: lclNum = getU1LittleEndian(codeAddr); LDARGA: JITDUMP(" %u", lclNum); Verify(lclNum < info.compILargsCount, "bad arg num"); if (compIsForInlining()) { // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument, // followed by a ldfld to load the field. op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo); if (op1->gtOper != GT_LCL_VAR) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR); return; } assert(op1->gtOper == GT_LCL_VAR); goto _PUSH_ADRVAR; } lclNum = compMapILargNum(lclNum); // account for possible hidden param assertImp(lclNum < numArgs); if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } goto ADRVAR; ADRVAR: op1 = impCreateLocalNode(lclNum DEBUGARG(opcodeOffs + sz + 1)); _PUSH_ADRVAR: assert(op1->gtOper == GT_LCL_VAR); /* Note that this is supposed to create the transient type "*" which may be used as a TYP_I_IMPL. However we catch places where it is used as a TYP_I_IMPL and change the node if needed. Thus we are pessimistic and may report byrefs in the GC info where it was not absolutely needed, but it is safer this way. */ op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1); // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does assert((op1->gtFlags & GTF_GLOB_REF) == 0); tiRetVal = lvaTable[lclNum].lvVerTypeInfo; impPushOnStack(op1, tiRetVal); break; case CEE_ARGLIST: if (!info.compIsVarArgs) { BADCODE("arglist in non-vararg method"); } assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG); /* The ARGLIST cookie is a hidden 'last' parameter, we have already adjusted the arg count cos this is like fetching the last param */ assertImp(0 < numArgs); lclNum = lvaVarargsHandleArg; op1 = gtNewLclvNode(lclNum, TYP_I_IMPL DEBUGARG(opcodeOffs + sz + 1)); op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1); impPushOnStack(op1, tiRetVal); break; case CEE_ENDFINALLY: if (compIsForInlining()) { assert(!"Shouldn't have exception handlers in the inliner!"); compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY); return; } if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } if (info.compXcptnsCount == 0) { BADCODE("endfinally outside finally"); } assert(verCurrentState.esStackDepth == 0); op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr); goto APPEND; case CEE_ENDFILTER: if (compIsForInlining()) { assert(!"Shouldn't have exception handlers in the inliner!"); compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER); return; } block->bbSetRunRarely(); // filters are rare if (info.compXcptnsCount == 0) { BADCODE("endfilter outside filter"); } op1 = impPopStack().val; assertImp(op1->gtType == TYP_INT); if (!bbInFilterILRange(block)) { BADCODE("EndFilter outside a filter handler"); } /* Mark current bb as end of filter */ assert(compCurBB->bbFlags & BBF_DONT_REMOVE); assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET); /* Mark catch handler as successor */ op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1); if (verCurrentState.esStackDepth != 0) { verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__) DEBUGARG(__LINE__)); } goto APPEND; case CEE_RET: prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it RET: if (!impReturnInstruction(prefixFlags, opcode)) { return; // abort } else { break; } case CEE_JMP: assert(!compIsForInlining()); if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex()) { /* CEE_JMP does not make sense in some "protected" regions. */ BADCODE("Jmp not allowed in protected region"); } if (opts.IsReversePInvoke()) { BADCODE("Jmp not allowed in reverse P/Invoke"); } if (verCurrentState.esStackDepth != 0) { BADCODE("Stack must be empty after CEE_JMPs"); } _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); /* The signature of the target has to be identical to ours. At least check that argCnt and returnType match */ eeGetMethodSig(resolvedToken.hMethod, &sig); if (sig.numArgs != info.compMethodInfo->args.numArgs || sig.retType != info.compMethodInfo->args.retType || sig.callConv != info.compMethodInfo->args.callConv) { BADCODE("Incompatible target for CEE_JMPs"); } op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod); /* Mark the basic block as being a JUMP instead of RETURN */ block->bbFlags |= BBF_HAS_JMP; /* Set this flag to make sure register arguments have a location assigned * even if we don't use them inside the method */ compJmpOpUsed = true; fgNoStructPromotion = true; goto APPEND; case CEE_LDELEMA: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); ldelemClsHnd = resolvedToken.hClass; // If it's a value class array we just do a simple address-of if (eeIsValueClass(ldelemClsHnd)) { CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd); if (cit == CORINFO_TYPE_UNDEF) { lclTyp = TYP_STRUCT; } else { lclTyp = JITtype2varType(cit); } goto ARR_LD_POST_VERIFY; } // Similarly, if its a readonly access, we can do a simple address-of // without doing a runtime type-check if (prefixFlags & PREFIX_READONLY) { lclTyp = TYP_REF; goto ARR_LD_POST_VERIFY; } // Otherwise we need the full helper function with run-time type check op1 = impTokenToHandle(&resolvedToken); if (op1 == nullptr) { // compDonotInline() return; } { GenTreeCall::Use* args = gtNewCallArgs(op1); // Type args = gtPrependNewCallArg(impPopStack().val, args); // index args = gtPrependNewCallArg(impPopStack().val, args); // array op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args); } impPushOnStack(op1, tiRetVal); break; // ldelem for reference and value types case CEE_LDELEM: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); ldelemClsHnd = resolvedToken.hClass; // If it's a reference type or generic variable type // then just generate code as though it's a ldelem.ref instruction if (!eeIsValueClass(ldelemClsHnd)) { lclTyp = TYP_REF; opcode = CEE_LDELEM_REF; } else { CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd); lclTyp = JITtype2varType(jitTyp); tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct tiRetVal.NormaliseForStack(); } goto ARR_LD_POST_VERIFY; case CEE_LDELEM_I1: lclTyp = TYP_BYTE; goto ARR_LD; case CEE_LDELEM_I2: lclTyp = TYP_SHORT; goto ARR_LD; case CEE_LDELEM_I: lclTyp = TYP_I_IMPL; goto ARR_LD; // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter // and treating it as TYP_INT avoids other asserts. case CEE_LDELEM_U4: lclTyp = TYP_INT; goto ARR_LD; case CEE_LDELEM_I4: lclTyp = TYP_INT; goto ARR_LD; case CEE_LDELEM_I8: lclTyp = TYP_LONG; goto ARR_LD; case CEE_LDELEM_REF: lclTyp = TYP_REF; goto ARR_LD; case CEE_LDELEM_R4: lclTyp = TYP_FLOAT; goto ARR_LD; case CEE_LDELEM_R8: lclTyp = TYP_DOUBLE; goto ARR_LD; case CEE_LDELEM_U1: lclTyp = TYP_UBYTE; goto ARR_LD; case CEE_LDELEM_U2: lclTyp = TYP_USHORT; goto ARR_LD; ARR_LD: ARR_LD_POST_VERIFY: /* Pull the index value and array address */ op2 = impPopStack().val; op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); /* Check for null pointer - in the inliner case we simply abort */ if (compIsForInlining()) { if (op1->gtOper == GT_CNS_INT) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM); return; } } /* Mark the block as containing an index expression */ if (op1->gtOper == GT_LCL_VAR) { if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD) { block->bbFlags |= BBF_HAS_IDX_LEN; optMethodFlags |= OMF_HAS_ARRAYREF; } } /* Create the index node and push it on the stack */ op1 = gtNewIndexRef(lclTyp, op1, op2); ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT); if ((opcode == CEE_LDELEMA) || ldstruct || (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd))) { assert(ldelemClsHnd != DUMMY_INIT(NULL)); // remember the element size if (lclTyp == TYP_REF) { op1->AsIndex()->gtIndElemSize = TARGET_POINTER_SIZE; } else { // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type. if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF) { op1->AsIndex()->gtStructElemClass = ldelemClsHnd; } assert(lclTyp != TYP_STRUCT || op1->AsIndex()->gtStructElemClass != nullptr); if (lclTyp == TYP_STRUCT) { size = info.compCompHnd->getClassSize(ldelemClsHnd); op1->AsIndex()->gtIndElemSize = size; op1->gtType = lclTyp; } } if ((opcode == CEE_LDELEMA) || ldstruct) { // wrap it in a & lclTyp = TYP_BYREF; op1 = gtNewOperNode(GT_ADDR, lclTyp, op1); } else { assert(lclTyp != TYP_STRUCT); } } if (ldstruct) { // Create an OBJ for the result op1 = gtNewObjNode(ldelemClsHnd, op1); op1->gtFlags |= GTF_EXCEPT; } impPushOnStack(op1, tiRetVal); break; // stelem for reference and value types case CEE_STELEM: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); stelemClsHnd = resolvedToken.hClass; // If it's a reference type just behave as though it's a stelem.ref instruction if (!eeIsValueClass(stelemClsHnd)) { goto STELEM_REF_POST_VERIFY; } // Otherwise extract the type { CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd); lclTyp = JITtype2varType(jitTyp); goto ARR_ST_POST_VERIFY; } case CEE_STELEM_REF: STELEM_REF_POST_VERIFY: if (opts.OptimizationEnabled()) { GenTree* array = impStackTop(2).val; GenTree* value = impStackTop().val; // Is this a case where we can skip the covariant store check? if (impCanSkipCovariantStoreCheck(value, array)) { lclTyp = TYP_REF; goto ARR_ST_POST_VERIFY; } } // Else call a helper function to do the assignment op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopCallArgs(3, nullptr)); goto SPILL_APPEND; case CEE_STELEM_I1: lclTyp = TYP_BYTE; goto ARR_ST; case CEE_STELEM_I2: lclTyp = TYP_SHORT; goto ARR_ST; case CEE_STELEM_I: lclTyp = TYP_I_IMPL; goto ARR_ST; case CEE_STELEM_I4: lclTyp = TYP_INT; goto ARR_ST; case CEE_STELEM_I8: lclTyp = TYP_LONG; goto ARR_ST; case CEE_STELEM_R4: lclTyp = TYP_FLOAT; goto ARR_ST; case CEE_STELEM_R8: lclTyp = TYP_DOUBLE; goto ARR_ST; ARR_ST: ARR_ST_POST_VERIFY: /* The strict order of evaluation is LHS-operands, RHS-operands, range-check, and then assignment. However, codegen currently does the range-check before evaluation the RHS-operands. So to maintain strict ordering, we spill the stack. */ if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Strict ordering of exceptions for Array store")); } /* Pull the new value from the stack */ op2 = impPopStack().val; /* Pull the index value */ op1 = impPopStack().val; /* Pull the array address */ op3 = impPopStack().val; assertImp(op3->gtType == TYP_REF); if (op2->IsLocalAddrExpr() != nullptr) { op2->gtType = TYP_I_IMPL; } // Mark the block as containing an index expression if (op3->gtOper == GT_LCL_VAR) { if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD) { block->bbFlags |= BBF_HAS_IDX_LEN; optMethodFlags |= OMF_HAS_ARRAYREF; } } /* Create the index node */ op1 = gtNewIndexRef(lclTyp, op3, op1); /* Create the assignment node and append it */ if (lclTyp == TYP_STRUCT) { assert(stelemClsHnd != DUMMY_INIT(NULL)); op1->AsIndex()->gtStructElemClass = stelemClsHnd; op1->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd); } if (varTypeIsStruct(op1)) { op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL); } else { op2 = impImplicitR4orR8Cast(op2, op1->TypeGet()); op1 = gtNewAssignNode(op1, op2); } /* Mark the expression as containing an assignment */ op1->gtFlags |= GTF_ASG; goto SPILL_APPEND; case CEE_ADD: oper = GT_ADD; goto MATH_OP2; case CEE_ADD_OVF: uns = false; goto ADD_OVF; case CEE_ADD_OVF_UN: uns = true; goto ADD_OVF; ADD_OVF: ovfl = true; callNode = false; oper = GT_ADD; goto MATH_OP2_FLAGS; case CEE_SUB: oper = GT_SUB; goto MATH_OP2; case CEE_SUB_OVF: uns = false; goto SUB_OVF; case CEE_SUB_OVF_UN: uns = true; goto SUB_OVF; SUB_OVF: ovfl = true; callNode = false; oper = GT_SUB; goto MATH_OP2_FLAGS; case CEE_MUL: oper = GT_MUL; goto MATH_MAYBE_CALL_NO_OVF; case CEE_MUL_OVF: uns = false; goto MUL_OVF; case CEE_MUL_OVF_UN: uns = true; goto MUL_OVF; MUL_OVF: ovfl = true; oper = GT_MUL; goto MATH_MAYBE_CALL_OVF; // Other binary math operations case CEE_DIV: oper = GT_DIV; goto MATH_MAYBE_CALL_NO_OVF; case CEE_DIV_UN: oper = GT_UDIV; goto MATH_MAYBE_CALL_NO_OVF; case CEE_REM: oper = GT_MOD; goto MATH_MAYBE_CALL_NO_OVF; case CEE_REM_UN: oper = GT_UMOD; goto MATH_MAYBE_CALL_NO_OVF; MATH_MAYBE_CALL_NO_OVF: ovfl = false; MATH_MAYBE_CALL_OVF: // Morpher has some complex logic about when to turn different // typed nodes on different platforms into helper calls. We // need to either duplicate that logic here, or just // pessimistically make all the nodes large enough to become // call nodes. Since call nodes aren't that much larger and // these opcodes are infrequent enough I chose the latter. callNode = true; goto MATH_OP2_FLAGS; case CEE_AND: oper = GT_AND; goto MATH_OP2; case CEE_OR: oper = GT_OR; goto MATH_OP2; case CEE_XOR: oper = GT_XOR; goto MATH_OP2; MATH_OP2: // For default values of 'ovfl' and 'callNode' ovfl = false; callNode = false; MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set /* Pull two values and push back the result */ op2 = impPopStack().val; op1 = impPopStack().val; /* Can't do arithmetic with references */ assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF); // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only // if it is in the stack) impBashVarAddrsToI(op1, op2); type = impGetByRefResultType(oper, uns, &op1, &op2); assert(!ovfl || !varTypeIsFloating(op1->gtType)); /* Special case: "int+0", "int-0", "int*1", "int/1" */ if (op2->gtOper == GT_CNS_INT) { if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) || (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV))) { impPushOnStack(op1, tiRetVal); break; } } // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand // if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { if (op1->TypeGet() != type) { // We insert a cast of op1 to 'type' op1 = gtNewCastNode(type, op1, false, type); } if (op2->TypeGet() != type) { // We insert a cast of op2 to 'type' op2 = gtNewCastNode(type, op2, false, type); } } if (callNode) { /* These operators can later be transformed into 'GT_CALL' */ assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]); #ifndef TARGET_ARM assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]); #endif // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying // that we'll need to transform into a general large node, but rather specifically // to a call: by doing it this way, things keep working if there are multiple sizes, // and a CALL is no longer the largest. // That said, as of now it *is* a large node, so we'll do this with an assert rather // than an "if". assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE); op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true)); } else { op1 = gtNewOperNode(oper, type, op1, op2); } /* Special case: integer/long division may throw an exception */ if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this)) { op1->gtFlags |= GTF_EXCEPT; } if (ovfl) { assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL); if (ovflType != TYP_UNKNOWN) { op1->gtType = ovflType; } op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW); if (uns) { op1->gtFlags |= GTF_UNSIGNED; } } impPushOnStack(op1, tiRetVal); break; case CEE_SHL: oper = GT_LSH; goto CEE_SH_OP2; case CEE_SHR: oper = GT_RSH; goto CEE_SH_OP2; case CEE_SHR_UN: oper = GT_RSZ; goto CEE_SH_OP2; CEE_SH_OP2: op2 = impPopStack().val; op1 = impPopStack().val; // operand to be shifted impBashVarAddrsToI(op1, op2); type = genActualType(op1->TypeGet()); op1 = gtNewOperNode(oper, type, op1, op2); impPushOnStack(op1, tiRetVal); break; case CEE_NOT: op1 = impPopStack().val; impBashVarAddrsToI(op1, nullptr); type = genActualType(op1->TypeGet()); impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal); break; case CEE_CKFINITE: op1 = impPopStack().val; type = op1->TypeGet(); op1 = gtNewOperNode(GT_CKFINITE, type, op1); op1->gtFlags |= GTF_EXCEPT; impPushOnStack(op1, tiRetVal); break; case CEE_LEAVE: val = getI4LittleEndian(codeAddr); // jump distance jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val); goto LEAVE; case CEE_LEAVE_S: val = getI1LittleEndian(codeAddr); // jump distance jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val); LEAVE: if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE); return; } JITDUMP(" %04X", jmpAddr); if (block->bbJumpKind != BBJ_LEAVE) { impResetLeaveBlock(block, jmpAddr); } assert(jmpAddr == block->bbJumpDest->bbCodeOffs); impImportLeave(block); impNoteBranchOffs(); break; case CEE_BR: case CEE_BR_S: jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr); if (compIsForInlining() && jmpDist == 0) { break; /* NOP */ } impNoteBranchOffs(); break; case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: /* Pop the comparand (now there's a neat term) from the stack */ op1 = impPopStack().val; type = op1->TypeGet(); // Per Ecma-355, brfalse and brtrue are only specified for nint, ref, and byref. // // We've historically been a bit more permissive, so here we allow // any type that gtNewZeroConNode can handle. if (!varTypeIsArithmetic(type) && !varTypeIsGC(type)) { BADCODE("invalid type for brtrue/brfalse"); } if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext)) { block->bbJumpKind = BBJ_NONE; if (op1->gtFlags & GTF_GLOB_EFFECT) { op1 = gtUnusedValNode(op1); goto SPILL_APPEND; } else { break; } } if (op1->OperIsCompare()) { if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S) { // Flip the sense of the compare op1 = gtReverseCond(op1); } } else { // We'll compare against an equally-sized integer 0 // For small types, we always compare against int op2 = gtNewZeroConNode(genActualType(op1->gtType)); // Create the comparison operator and try to fold it oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ; op1 = gtNewOperNode(oper, TYP_INT, op1, op2); } // fall through COND_JUMP: /* Fold comparison if we can */ op1 = gtFoldExpr(op1); /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/ /* Don't make any blocks unreachable in import only mode */ if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly()) { /* gtFoldExpr() should prevent this as we don't want to make any blocks unreachable under compDbgCode */ assert(!opts.compDbgCode); BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->AsIntCon()->gtIconVal ? BBJ_ALWAYS : BBJ_NONE); assertImp((block->bbJumpKind == BBJ_COND) // normal case || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the // block for the second time block->bbJumpKind = foldedJumpKind; #ifdef DEBUG if (verbose) { if (op1->AsIntCon()->gtIconVal) { printf("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n", block->bbJumpDest->bbNum); } else { printf("\nThe block falls through into the next " FMT_BB "\n", block->bbNext->bbNum); } } #endif break; } op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1); /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt' in impImportBlock(block). For correct line numbers, spill stack. */ if (opts.compDbgCode && impCurStmtDI.IsValid()) { impSpillStackEnsure(true); } goto SPILL_APPEND; case CEE_CEQ: oper = GT_EQ; uns = false; goto CMP_2_OPs; case CEE_CGT_UN: oper = GT_GT; uns = true; goto CMP_2_OPs; case CEE_CGT: oper = GT_GT; uns = false; goto CMP_2_OPs; case CEE_CLT_UN: oper = GT_LT; uns = true; goto CMP_2_OPs; case CEE_CLT: oper = GT_LT; uns = false; goto CMP_2_OPs; CMP_2_OPs: op2 = impPopStack().val; op1 = impPopStack().val; // Recognize the IL idiom of CGT_UN(op1, 0) and normalize // it so that downstream optimizations don't have to. if ((opcode == CEE_CGT_UN) && op2->IsIntegralConst(0)) { oper = GT_NE; uns = false; } #ifdef TARGET_64BIT // TODO-Casts: create a helper that upcasts int32 -> native int when necessary. // See also identical code in impGetByRefResultType and STSFLD import. if (varTypeIsI(op1) && (genActualType(op2) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, TYP_I_IMPL); } else if (varTypeIsI(op2) && (genActualType(op1) == TYP_INT)) { op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, TYP_I_IMPL); } #endif // TARGET_64BIT assertImp(genActualType(op1) == genActualType(op2) || (varTypeIsI(op1) && varTypeIsI(op2)) || (varTypeIsFloating(op1) && varTypeIsFloating(op2))); // Create the comparison node. op1 = gtNewOperNode(oper, TYP_INT, op1, op2); // TODO: setting both flags when only one is appropriate. if (uns) { op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED; } // Fold result, if possible. op1 = gtFoldExpr(op1); impPushOnStack(op1, tiRetVal); break; case CEE_BEQ_S: case CEE_BEQ: oper = GT_EQ; goto CMP_2_OPs_AND_BR; case CEE_BGE_S: case CEE_BGE: oper = GT_GE; goto CMP_2_OPs_AND_BR; case CEE_BGE_UN_S: case CEE_BGE_UN: oper = GT_GE; goto CMP_2_OPs_AND_BR_UN; case CEE_BGT_S: case CEE_BGT: oper = GT_GT; goto CMP_2_OPs_AND_BR; case CEE_BGT_UN_S: case CEE_BGT_UN: oper = GT_GT; goto CMP_2_OPs_AND_BR_UN; case CEE_BLE_S: case CEE_BLE: oper = GT_LE; goto CMP_2_OPs_AND_BR; case CEE_BLE_UN_S: case CEE_BLE_UN: oper = GT_LE; goto CMP_2_OPs_AND_BR_UN; case CEE_BLT_S: case CEE_BLT: oper = GT_LT; goto CMP_2_OPs_AND_BR; case CEE_BLT_UN_S: case CEE_BLT_UN: oper = GT_LT; goto CMP_2_OPs_AND_BR_UN; case CEE_BNE_UN_S: case CEE_BNE_UN: oper = GT_NE; goto CMP_2_OPs_AND_BR_UN; CMP_2_OPs_AND_BR_UN: uns = true; unordered = true; goto CMP_2_OPs_AND_BR_ALL; CMP_2_OPs_AND_BR: uns = false; unordered = false; goto CMP_2_OPs_AND_BR_ALL; CMP_2_OPs_AND_BR_ALL: /* Pull two values */ op2 = impPopStack().val; op1 = impPopStack().val; #ifdef TARGET_64BIT if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL); } else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT)) { op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) || (varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet())) || (varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))); if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext)) { block->bbJumpKind = BBJ_NONE; if (op1->gtFlags & GTF_GLOB_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Branch to next Optimization, op1 side effect")); impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } if (op2->gtFlags & GTF_GLOB_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Branch to next Optimization, op2 side effect")); impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } #ifdef DEBUG if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT) { impNoteLastILoffs(); } #endif break; } // We can generate an compare of different sized floating point op1 and op2 // We insert a cast // if (varTypeIsFloating(op1->TypeGet())) { if (op1->TypeGet() != op2->TypeGet()) { assert(varTypeIsFloating(op2->TypeGet())); // say op1=double, op2=float. To avoid loss of precision // while comparing, op2 is converted to double and double // comparison is done. if (op1->TypeGet() == TYP_DOUBLE) { // We insert a cast of op2 to TYP_DOUBLE op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE); } else if (op2->TypeGet() == TYP_DOUBLE) { // We insert a cast of op1 to TYP_DOUBLE op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE); } } } /* Create and append the operator */ op1 = gtNewOperNode(oper, TYP_INT, op1, op2); if (uns) { op1->gtFlags |= GTF_UNSIGNED; } if (unordered) { op1->gtFlags |= GTF_RELOP_NAN_UN; } goto COND_JUMP; case CEE_SWITCH: /* Pop the switch value off the stack */ op1 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op1->TypeGet())); /* We can create a switch node */ op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1); val = (int)getU4LittleEndian(codeAddr); codeAddr += 4 + val * 4; // skip over the switch-table goto SPILL_APPEND; /************************** Casting OPCODES ***************************/ case CEE_CONV_OVF_I1: lclTyp = TYP_BYTE; goto CONV_OVF; case CEE_CONV_OVF_I2: lclTyp = TYP_SHORT; goto CONV_OVF; case CEE_CONV_OVF_I: lclTyp = TYP_I_IMPL; goto CONV_OVF; case CEE_CONV_OVF_I4: lclTyp = TYP_INT; goto CONV_OVF; case CEE_CONV_OVF_I8: lclTyp = TYP_LONG; goto CONV_OVF; case CEE_CONV_OVF_U1: lclTyp = TYP_UBYTE; goto CONV_OVF; case CEE_CONV_OVF_U2: lclTyp = TYP_USHORT; goto CONV_OVF; case CEE_CONV_OVF_U: lclTyp = TYP_U_IMPL; goto CONV_OVF; case CEE_CONV_OVF_U4: lclTyp = TYP_UINT; goto CONV_OVF; case CEE_CONV_OVF_U8: lclTyp = TYP_ULONG; goto CONV_OVF; case CEE_CONV_OVF_I1_UN: lclTyp = TYP_BYTE; goto CONV_OVF_UN; case CEE_CONV_OVF_I2_UN: lclTyp = TYP_SHORT; goto CONV_OVF_UN; case CEE_CONV_OVF_I_UN: lclTyp = TYP_I_IMPL; goto CONV_OVF_UN; case CEE_CONV_OVF_I4_UN: lclTyp = TYP_INT; goto CONV_OVF_UN; case CEE_CONV_OVF_I8_UN: lclTyp = TYP_LONG; goto CONV_OVF_UN; case CEE_CONV_OVF_U1_UN: lclTyp = TYP_UBYTE; goto CONV_OVF_UN; case CEE_CONV_OVF_U2_UN: lclTyp = TYP_USHORT; goto CONV_OVF_UN; case CEE_CONV_OVF_U_UN: lclTyp = TYP_U_IMPL; goto CONV_OVF_UN; case CEE_CONV_OVF_U4_UN: lclTyp = TYP_UINT; goto CONV_OVF_UN; case CEE_CONV_OVF_U8_UN: lclTyp = TYP_ULONG; goto CONV_OVF_UN; CONV_OVF_UN: uns = true; goto CONV_OVF_COMMON; CONV_OVF: uns = false; goto CONV_OVF_COMMON; CONV_OVF_COMMON: ovfl = true; goto _CONV; case CEE_CONV_I1: lclTyp = TYP_BYTE; goto CONV; case CEE_CONV_I2: lclTyp = TYP_SHORT; goto CONV; case CEE_CONV_I: lclTyp = TYP_I_IMPL; goto CONV; case CEE_CONV_I4: lclTyp = TYP_INT; goto CONV; case CEE_CONV_I8: lclTyp = TYP_LONG; goto CONV; case CEE_CONV_U1: lclTyp = TYP_UBYTE; goto CONV; case CEE_CONV_U2: lclTyp = TYP_USHORT; goto CONV; #if (REGSIZE_BYTES == 8) case CEE_CONV_U: lclTyp = TYP_U_IMPL; goto CONV_UN; #else case CEE_CONV_U: lclTyp = TYP_U_IMPL; goto CONV; #endif case CEE_CONV_U4: lclTyp = TYP_UINT; goto CONV; case CEE_CONV_U8: lclTyp = TYP_ULONG; goto CONV_UN; case CEE_CONV_R4: lclTyp = TYP_FLOAT; goto CONV; case CEE_CONV_R8: lclTyp = TYP_DOUBLE; goto CONV; case CEE_CONV_R_UN: lclTyp = TYP_DOUBLE; goto CONV_UN; CONV_UN: uns = true; ovfl = false; goto _CONV; CONV: uns = false; ovfl = false; goto _CONV; _CONV: // only converts from FLOAT or DOUBLE to an integer type // and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls if (varTypeIsFloating(lclTyp)) { callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl #ifdef TARGET_64BIT // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK? // TYP_BYREF could be used as TYP_I_IMPL which is long. // TODO-CQ: remove this when we lower casts long/ulong --> float/double // and generate SSE2 code instead of going through helper calls. || (impStackTop().val->TypeGet() == TYP_BYREF) #endif ; } else { callNode = varTypeIsFloating(impStackTop().val->TypeGet()); } op1 = impPopStack().val; impBashVarAddrsToI(op1); // Casts from floating point types must not have GTF_UNSIGNED set. if (varTypeIsFloating(op1)) { uns = false; } // At this point uns, ovf, callNode are all set. if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND) { op2 = op1->AsOp()->gtOp2; if (op2->gtOper == GT_CNS_INT) { ssize_t ival = op2->AsIntCon()->gtIconVal; ssize_t mask, umask; switch (lclTyp) { case TYP_BYTE: case TYP_UBYTE: mask = 0x00FF; umask = 0x007F; break; case TYP_USHORT: case TYP_SHORT: mask = 0xFFFF; umask = 0x7FFF; break; default: assert(!"unexpected type"); return; } if (((ival & umask) == ival) || ((ival & mask) == ival && uns)) { /* Toss the cast, it's a waste of time */ impPushOnStack(op1, tiRetVal); break; } else if (ival == mask) { /* Toss the masking, it's a waste of time, since we sign-extend from the small value anyways */ op1 = op1->AsOp()->gtOp1; } } } /* The 'op2' sub-operand of a cast is the 'real' type number, since the result of a cast to one of the 'small' integer types is an integer. */ type = genActualType(lclTyp); // If this is a no-op cast, just use op1. if (!ovfl && (type == op1->TypeGet()) && (genTypeSize(type) == genTypeSize(lclTyp))) { // Nothing needs to change } // Work is evidently required, add cast node else { if (callNode) { op1 = gtNewCastNodeL(type, op1, uns, lclTyp); } else { op1 = gtNewCastNode(type, op1, uns, lclTyp); } if (ovfl) { op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT); } if (op1->gtGetOp1()->OperIsConst() && opts.OptimizationEnabled()) { // Try and fold the introduced cast op1 = gtFoldExprConst(op1); } } impPushOnStack(op1, tiRetVal); break; case CEE_NEG: op1 = impPopStack().val; impBashVarAddrsToI(op1, nullptr); impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal); break; case CEE_POP: { /* Pull the top value from the stack */ StackEntry se = impPopStack(); clsHnd = se.seTypeInfo.GetClassHandle(); op1 = se.val; /* Get hold of the type of the value being duplicated */ lclTyp = genActualType(op1->gtType); /* Does the value have any side effects? */ if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode) { // Since we are throwing away the value, just normalize // it to its address. This is more efficient. if (varTypeIsStruct(op1)) { JITDUMP("\n ... CEE_POP struct ...\n"); DISPTREE(op1); #ifdef UNIX_AMD64_ABI // Non-calls, such as obj or ret_expr, have to go through this. // Calls with large struct return value have to go through this. // Helper calls with small struct return value also have to go // through this since they do not follow Unix calling convention. if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd, op1->AsCall()->GetUnmanagedCallConv()) || op1->AsCall()->gtCallType == CT_HELPER) #endif // UNIX_AMD64_ABI { // If the value being produced comes from loading // via an underlying address, just null check the address. if (op1->OperIs(GT_FIELD, GT_IND, GT_OBJ)) { gtChangeOperToNullCheck(op1, block); } else { op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false); } JITDUMP("\n ... optimized to ...\n"); DISPTREE(op1); } } // If op1 is non-overflow cast, throw it away since it is useless. // Another reason for throwing away the useless cast is in the context of // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)). // The cast gets added as part of importing GT_CALL, which gets in the way // of fgMorphCall() on the forms of tail call nodes that we assert. if ((op1->gtOper == GT_CAST) && !op1->gtOverflow()) { op1 = op1->AsOp()->gtOp1; } if (op1->gtOper != GT_CALL) { if ((op1->gtFlags & GTF_SIDE_EFFECT) != 0) { op1 = gtUnusedValNode(op1); } else { // Can't bash to NOP here because op1 can be referenced from `currentBlock->bbEntryState`, // if we ever need to reimport we need a valid LCL_VAR on it. op1 = gtNewNothingNode(); } } /* Append the value to the tree list */ goto SPILL_APPEND; } /* No side effects - just throw the <BEEP> thing away */ } break; case CEE_DUP: { StackEntry se = impPopStack(); GenTree* tree = se.val; tiRetVal = se.seTypeInfo; op1 = tree; // If the expression to dup is simple, just clone it. // Otherwise spill it to a temp, and reload the temp twice. bool cloneExpr = false; if (!opts.compDbgCode) { // Duplicate 0 and +0.0 if (op1->IsIntegralConst(0) || op1->IsFloatPositiveZero()) { cloneExpr = true; } // Duplicate locals and addresses of them else if (op1->IsLocal()) { cloneExpr = true; } else if (op1->TypeIs(TYP_BYREF) && op1->OperIs(GT_ADDR) && op1->gtGetOp1()->IsLocal() && (OPCODE)impGetNonPrefixOpcode(codeAddr + sz, codeEndp) != CEE_INITOBJ) { cloneExpr = true; } } else { // Always clone for debug mode cloneExpr = true; } if (!cloneExpr) { const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill")); impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); var_types type = genActualType(lvaTable[tmpNum].TypeGet()); op1 = gtNewLclvNode(tmpNum, type); // Propagate type info to the temp from the stack and the original tree if (type == TYP_REF) { assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def local\n", tmpNum); lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle()); } } op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("DUP instruction")); assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT)); impPushOnStack(op1, tiRetVal); impPushOnStack(op2, tiRetVal); } break; case CEE_STIND_I1: lclTyp = TYP_BYTE; goto STIND; case CEE_STIND_I2: lclTyp = TYP_SHORT; goto STIND; case CEE_STIND_I4: lclTyp = TYP_INT; goto STIND; case CEE_STIND_I8: lclTyp = TYP_LONG; goto STIND; case CEE_STIND_I: lclTyp = TYP_I_IMPL; goto STIND; case CEE_STIND_REF: lclTyp = TYP_REF; goto STIND; case CEE_STIND_R4: lclTyp = TYP_FLOAT; goto STIND; case CEE_STIND_R8: lclTyp = TYP_DOUBLE; goto STIND; STIND: op2 = impPopStack().val; // value to store op1 = impPopStack().val; // address to store to // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); impBashVarAddrsToI(op1, op2); op2 = impImplicitR4orR8Cast(op2, lclTyp); #ifdef TARGET_64BIT // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType)) { op2->gtType = TYP_I_IMPL; } else { // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity // if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT)) { op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT); } // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL); } } #endif // TARGET_64BIT if (opcode == CEE_STIND_REF) { // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType)); lclTyp = genActualType(op2->TypeGet()); } // Check target type. #ifdef DEBUG if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF) { if (op2->gtType == TYP_BYREF) { assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL); } else if (lclTyp == TYP_BYREF) { assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType)); } } else { assertImp(genActualType(op2->gtType) == genActualType(lclTyp) || ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) || (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp))); } #endif op1 = gtNewOperNode(GT_IND, lclTyp, op1); // stind could point anywhere, example a boxed class static int op1->gtFlags |= GTF_IND_TGTANYWHERE; if (prefixFlags & PREFIX_VOLATILE) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_IND_UNALIGNED; } op1 = gtNewAssignNode(op1, op2); op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF; // Spill side-effects AND global-data-accesses if (verCurrentState.esStackDepth > 0) { impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND")); } goto APPEND; case CEE_LDIND_I1: lclTyp = TYP_BYTE; goto LDIND; case CEE_LDIND_I2: lclTyp = TYP_SHORT; goto LDIND; case CEE_LDIND_U4: case CEE_LDIND_I4: lclTyp = TYP_INT; goto LDIND; case CEE_LDIND_I8: lclTyp = TYP_LONG; goto LDIND; case CEE_LDIND_REF: lclTyp = TYP_REF; goto LDIND; case CEE_LDIND_I: lclTyp = TYP_I_IMPL; goto LDIND; case CEE_LDIND_R4: lclTyp = TYP_FLOAT; goto LDIND; case CEE_LDIND_R8: lclTyp = TYP_DOUBLE; goto LDIND; case CEE_LDIND_U1: lclTyp = TYP_UBYTE; goto LDIND; case CEE_LDIND_U2: lclTyp = TYP_USHORT; goto LDIND; LDIND: op1 = impPopStack().val; // address to load from impBashVarAddrsToI(op1); #ifdef TARGET_64BIT // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (genActualType(op1->gtType) == TYP_INT) { op1 = gtNewCastNode(TYP_I_IMPL, op1, false, TYP_I_IMPL); } #endif assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); op1 = gtNewOperNode(GT_IND, lclTyp, op1); // ldind could point anywhere, example a boxed class static int op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE); if (prefixFlags & PREFIX_VOLATILE) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_IND_UNALIGNED; } impPushOnStack(op1, tiRetVal); break; case CEE_UNALIGNED: assert(sz == 1); val = getU1LittleEndian(codeAddr); ++codeAddr; JITDUMP(" %u", val); if ((val != 1) && (val != 2) && (val != 4)) { BADCODE("Alignment unaligned. must be 1, 2, or 4"); } Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes"); prefixFlags |= PREFIX_UNALIGNED; impValidateMemoryAccessOpcode(codeAddr, codeEndp, false); PREFIX: opcode = (OPCODE)getU1LittleEndian(codeAddr); opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); codeAddr += sizeof(__int8); goto DECODE_OPCODE; case CEE_VOLATILE: Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes"); prefixFlags |= PREFIX_VOLATILE; impValidateMemoryAccessOpcode(codeAddr, codeEndp, true); assert(sz == 0); goto PREFIX; case CEE_LDFTN: { // Need to do a lookup here so that we perform an access check // and do a NOWAY if protections are violated _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); eeGetCallInfo(&resolvedToken, (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr, combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN), &callInfo); // This check really only applies to intrinsic Array.Address methods if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE) { NO_WAY("Currently do not support LDFTN of Parameterized functions"); } // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own. impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); DO_LDFTN: op1 = impMethodPointer(&resolvedToken, &callInfo); if (compDonotInline()) { return; } // Call info may have more precise information about the function than // the resolved token. CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken); assert(callInfo.hMethod != nullptr); heapToken->hMethod = callInfo.hMethod; impPushOnStack(op1, typeInfo(heapToken)); break; } case CEE_LDVIRTFTN: { /* Get the method token */ _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */, combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN), CORINFO_CALLINFO_CALLVIRT), &callInfo); // This check really only applies to intrinsic Array.Address methods if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE) { NO_WAY("Currently do not support LDFTN of Parameterized functions"); } mflags = callInfo.methodFlags; impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); if (compIsForInlining()) { if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL)) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL); return; } } CORINFO_SIG_INFO& ftnSig = callInfo.sig; /* Get the object-ref */ op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); if (opts.IsReadyToRun()) { if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN) { if (op1->gtFlags & GTF_SIDE_EFFECT) { op1 = gtUnusedValNode(op1); impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } goto DO_LDFTN; } } else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL)) { if (op1->gtFlags & GTF_SIDE_EFFECT) { op1 = gtUnusedValNode(op1); impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } goto DO_LDFTN; } GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo); if (compDonotInline()) { return; } CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken); assert(heapToken->tokenType == CORINFO_TOKENKIND_Method); assert(callInfo.hMethod != nullptr); heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn; heapToken->hMethod = callInfo.hMethod; impPushOnStack(fptr, typeInfo(heapToken)); break; } case CEE_CONSTRAINED: assertImp(sz == sizeof(unsigned)); impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained); codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually JITDUMP(" (%08X) ", constrainedResolvedToken.token); Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes"); prefixFlags |= PREFIX_CONSTRAINED; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (actualOpcode != CEE_CALLVIRT && actualOpcode != CEE_CALL && actualOpcode != CEE_LDFTN) { BADCODE("constrained. has to be followed by callvirt, call or ldftn"); } } goto PREFIX; case CEE_READONLY: JITDUMP(" readonly."); Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes"); prefixFlags |= PREFIX_READONLY; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("readonly. has to be followed by ldelema or call"); } } assert(sz == 0); goto PREFIX; case CEE_TAILCALL: JITDUMP(" tail."); Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes"); prefixFlags |= PREFIX_TAILCALL_EXPLICIT; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (!impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("tailcall. has to be followed by call, callvirt or calli"); } } assert(sz == 0); goto PREFIX; case CEE_NEWOBJ: /* Since we will implicitly insert newObjThisPtr at the start of the argument list, spill any GTF_ORDER_SIDEEFF */ impSpillSpecialSideEff(); /* NEWOBJ does not respond to TAIL */ prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT; /* NEWOBJ does not respond to CONSTRAINED */ prefixFlags &= ~PREFIX_CONSTRAINED; _impResolveToken(CORINFO_TOKENKIND_NewObj); eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/, combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM), &callInfo); mflags = callInfo.methodFlags; if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0) { BADCODE("newobj on static or abstract method"); } // Insert the security callout before any actual code is generated impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); // There are three different cases for new // Object size is variable (depends on arguments) // 1) Object is an array (arrays treated specially by the EE) // 2) Object is some other variable sized object (e.g. String) // 3) Class Size can be determined beforehand (normal case) // In the first case, we need to call a NEWOBJ helper (multinewarray) // in the second case we call the constructor with a '0' this pointer // In the third case we alloc the memory, then call the constuctor clsFlags = callInfo.classFlags; if (clsFlags & CORINFO_FLG_ARRAY) { // Arrays need to call the NEWOBJ helper. assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE); impImportNewObjArray(&resolvedToken, &callInfo); if (compDonotInline()) { return; } callTyp = TYP_REF; break; } // At present this can only be String else if (clsFlags & CORINFO_FLG_VAROBJSIZE) { // Skip this thisPtr argument newObjThisPtr = nullptr; /* Remember that this basic block contains 'new' of an object */ block->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; } else { // This is the normal case where the size of the object is // fixed. Allocate the memory and call the constructor. // Note: We cannot add a peep to avoid use of temp here // becase we don't have enough interference info to detect when // sources and destination interfere, example: s = new S(ref); // TODO: We find the correct place to introduce a general // reverse copy prop for struct return values from newobj or // any function returning structs. /* get a temporary for the new object */ lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp")); if (compDonotInline()) { // Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS. assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS); return; } // In the value class case we only need clsHnd for size calcs. // // The lookup of the code pointer will be handled by CALL in this case if (clsFlags & CORINFO_FLG_VALUECLASS) { if (compIsForInlining()) { // If value class has GC fields, inform the inliner. It may choose to // bail out on the inline. DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0) { compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT); if (compInlineResult->IsFailure()) { return; } // Do further notification in the case where the call site is rare; // some policies do not track the relative hotness of call sites for // "always" inline cases. if (impInlineInfo->iciBlock->isRunRarely()) { compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT); if (compInlineResult->IsFailure()) { return; } } } } CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { lvaTable[lclNum].lvType = JITtype2varType(jitTyp); } else { // The local variable itself is the allocated space. // Here we need unsafe value cls check, since the address of struct is taken for further use // and potentially exploitable. lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */); } bool bbInALoop = impBlockIsInALoop(block); bool bbIsReturn = (block->bbJumpKind == BBJ_RETURN) && (!compIsForInlining() || (impInlineInfo->iciBlock->bbJumpKind == BBJ_RETURN)); LclVarDsc* const lclDsc = lvaGetDesc(lclNum); if (fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn)) { // Append a tree to zero-out the temp newObjThisPtr = gtNewLclvNode(lclNum, lclDsc->TypeGet()); newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest gtNewIconNode(0), // Value false, // isVolatile false); // not copyBlock impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } else { JITDUMP("\nSuppressing zero-init for V%02u -- expect to zero in prolog\n", lclNum); lclDsc->lvSuppressedZeroInit = 1; compSuppressedZeroInit = true; } // Obtain the address of the temp newObjThisPtr = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet())); } else { // If we're newing up a finalizable object, spill anything that can cause exceptions. // bool hasSideEffects = false; CorInfoHelpFunc newHelper = info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd, &hasSideEffects); if (hasSideEffects) { JITDUMP("\nSpilling stack for finalizable newobj\n"); impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("finalizable newobj spill")); } const bool useParent = true; op1 = gtNewAllocObjNode(&resolvedToken, useParent); if (op1 == nullptr) { return; } // Remember that this basic block contains 'new' of an object block->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; // Append the assignment to the temp/local. Dont need to spill // at all as we are just calling an EE-Jit helper which can only // cause an (async) OutOfMemoryException. // We assign the newly allocated object (by a GT_ALLOCOBJ node) // to a temp. Note that the pattern "temp = allocObj" is required // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes // without exhaustive walk over all expressions. impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE); assert(lvaTable[lclNum].lvSingleDef == 0); lvaTable[lclNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def local\n", lclNum); lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */); newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF); } } goto CALL; case CEE_CALLI: /* CALLI does not respond to CONSTRAINED */ prefixFlags &= ~PREFIX_CONSTRAINED; FALLTHROUGH; case CEE_CALLVIRT: case CEE_CALL: // We can't call getCallInfo on the token from a CALLI, but we need it in // many other places. We unfortunately embed that knowledge here. if (opcode != CEE_CALLI) { _impResolveToken(CORINFO_TOKENKIND_Method); eeGetCallInfo(&resolvedToken, (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr, // this is how impImportCall invokes getCallInfo combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT : CORINFO_CALLINFO_NONE), &callInfo); } else { // Suppress uninitialized use warning. memset(&resolvedToken, 0, sizeof(resolvedToken)); memset(&callInfo, 0, sizeof(callInfo)); resolvedToken.token = getU4LittleEndian(codeAddr); resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; } CALL: // memberRef should be set. // newObjThisPtr should be set for CEE_NEWOBJ JITDUMP(" %08X", resolvedToken.token); constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0; bool newBBcreatedForTailcallStress; bool passedStressModeValidation; newBBcreatedForTailcallStress = false; passedStressModeValidation = true; if (compIsForInlining()) { if (compDonotInline()) { return; } // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks. assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0); } else { if (compTailCallStress()) { // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()? // Tail call stress only recognizes call+ret patterns and forces them to be // explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress // doesn't import 'ret' opcode following the call into the basic block containing // the call instead imports it to a new basic block. Note that fgMakeBasicBlocks() // is already checking that there is an opcode following call and hence it is // safe here to read next opcode without bounds check. newBBcreatedForTailcallStress = impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't // make it jump to RET. (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET bool hasTailPrefix = (prefixFlags & PREFIX_TAILCALL_EXPLICIT); if (newBBcreatedForTailcallStress && !hasTailPrefix) { // Do a more detailed evaluation of legality const bool returnFalseIfInvalid = true; const bool passedConstraintCheck = verCheckTailCallConstraint(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr, returnFalseIfInvalid); if (passedConstraintCheck) { // Now check with the runtime CORINFO_METHOD_HANDLE declaredCalleeHnd = callInfo.hMethod; bool isVirtual = (callInfo.kind == CORINFO_VIRTUALCALL_STUB) || (callInfo.kind == CORINFO_VIRTUALCALL_VTABLE); CORINFO_METHOD_HANDLE exactCalleeHnd = isVirtual ? nullptr : declaredCalleeHnd; if (info.compCompHnd->canTailCall(info.compMethodHnd, declaredCalleeHnd, exactCalleeHnd, hasTailPrefix)) // Is it legal to do tailcall? { // Stress the tailcall. JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)"); prefixFlags |= PREFIX_TAILCALL_EXPLICIT; prefixFlags |= PREFIX_TAILCALL_STRESS; } else { // Runtime disallows this tail call JITDUMP(" (Tailcall stress: runtime preventing tailcall)"); passedStressModeValidation = false; } } else { // Constraints disallow this tail call JITDUMP(" (Tailcall stress: constraint check failed)"); passedStressModeValidation = false; } } } } // This is split up to avoid goto flow warnings. bool isRecursive; isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd); // If we've already disqualified this call as a tail call under tail call stress, // don't consider it for implicit tail calling either. // // When not running under tail call stress, we may mark this call as an implicit // tail call candidate. We'll do an "equivalent" validation during impImportCall. // // Note that when running under tail call stress, a call marked as explicit // tail prefixed will not be considered for implicit tail calling. if (passedStressModeValidation && impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive)) { if (compIsForInlining()) { #if FEATURE_TAILCALL_OPT_SHARED_RETURN // Are we inlining at an implicit tail call site? If so the we can flag // implicit tail call sites in the inline body. These call sites // often end up in non BBJ_RETURN blocks, so only flag them when // we're able to handle shared returns. if (impInlineInfo->iciCall->IsImplicitTailCall()) { JITDUMP("\n (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)"); prefixFlags |= PREFIX_TAILCALL_IMPLICIT; } #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN } else { JITDUMP("\n (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)"); prefixFlags |= PREFIX_TAILCALL_IMPLICIT; } } // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call). explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0; readonlyCall = (prefixFlags & PREFIX_READONLY) != 0; if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ) { // All calls and delegates need a security callout. // For delegates, this is the call to the delegate constructor, not the access check on the // LD(virt)FTN. impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); } callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr, newObjThisPtr, prefixFlags, &callInfo, opcodeOffs); if (compDonotInline()) { // We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue. assert((callTyp == TYP_UNDEF) || (compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS)); return; } if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we // have created a new BB after the "call" // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless. { assert(!compIsForInlining()); goto RET; } break; case CEE_LDFLD: case CEE_LDSFLD: case CEE_LDFLDA: case CEE_LDSFLDA: { bool isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA); bool isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA); /* Get the CP_Fieldref index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Field); JITDUMP(" %08X", resolvedToken.token); int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET; GenTree* obj = nullptr; typeInfo* tiObj = nullptr; CORINFO_CLASS_HANDLE objType = nullptr; // used for fields if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA) { tiObj = &impStackTop().seTypeInfo; StackEntry se = impPopStack(); objType = se.seTypeInfo.GetClassHandle(); obj = se.val; if (impIsThis(obj)) { aflags |= CORINFO_ACCESS_THIS; } } eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo); // Figure out the type of the member. We always call canAccessField, so you always need this // handle CorInfoType ciType = fieldInfo.fieldType; clsHnd = fieldInfo.structType; lclTyp = JITtype2varType(ciType); if (compIsForInlining()) { switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_STATIC_TLS: compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER); return; case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: /* We may be able to inline the field accessors in specific instantiations of generic * methods */ compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER); return; default: break; } if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT && clsHnd) { if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) && !(info.compFlags & CORINFO_FLG_FORCEINLINE)) { // Loading a static valuetype field usually will cause a JitHelper to be called // for the static base. This will bloat the code. compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS); if (compInlineResult->IsFailure()) { return; } } } } tiRetVal = verMakeTypeInfo(ciType, clsHnd); if (isLoadAddress) { tiRetVal.MakeByRef(); } else { tiRetVal.NormaliseForStack(); } // Perform this check always to ensure that we get field access exceptions even with // SkipVerification. impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper); // Raise InvalidProgramException if static load accesses non-static field if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0)) { BADCODE("static access on an instance field"); } // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj. if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr) { if (obj->gtFlags & GTF_SIDE_EFFECT) { obj = gtUnusedValNode(obj); impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } obj = nullptr; } /* Preserve 'small' int types */ if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } bool usesHelper = false; switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE: #ifdef FEATURE_READYTORUN case CORINFO_FIELD_INSTANCE_WITH_BASE: #endif { // If the object is a struct, what we really want is // for the field to operate on the address of the struct. if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj)) { assert(opcode == CEE_LDFLD && objType != nullptr); obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true); } /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset); #ifdef FEATURE_READYTORUN if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE) { op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup; } #endif op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT); if (fgAddrCouldBeNull(obj)) { op1->gtFlags |= GTF_EXCEPT; } // If the object is a BYREF then our target is a value class and // it could point anywhere, example a boxed class static int if (obj->gtType == TYP_BYREF) { op1->gtFlags |= GTF_IND_TGTANYWHERE; } DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if (StructHasOverlappingFields(typeFlags)) { op1->AsField()->gtFldMayOverlap = true; } // wrap it in a address of operator if necessary if (isLoadAddress) { op1 = gtNewOperNode(GT_ADDR, (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1); } else { if (compIsForInlining() && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, nullptr, obj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } } break; case CORINFO_FIELD_STATIC_TLS: #ifdef TARGET_X86 // Legacy TLS access is implemented as intrinsic on x86 only /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset); op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation if (isLoadAddress) { op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1); } break; #else fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER; FALLTHROUGH; #endif case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp, clsHnd, nullptr); usesHelper = true; break; case CORINFO_FIELD_STATIC_ADDRESS: // Replace static read-only fields with constant if possible if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) && !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) && (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp))) { CorInfoInitClassResult initClassResult = info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd, impTokenLookupContextHandle); if (initClassResult & CORINFO_INITCLASS_INITIALIZED) { void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr); // We should always be able to access this static's address directly // assert(pFldAddr == nullptr); op1 = impImportStaticReadOnlyField(fldAddr, lclTyp); // Widen small types since we're propagating the value // instead of producing an indir. // op1->gtType = genActualType(lclTyp); goto FIELD_DONE; } } FALLTHROUGH; case CORINFO_FIELD_STATIC_RVA_ADDRESS: case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp); break; case CORINFO_FIELD_INTRINSIC_ZERO: { assert(aflags & CORINFO_ACCESS_GET); // Widen to stack type lclTyp = genActualType(lclTyp); op1 = gtNewIconNode(0, lclTyp); goto FIELD_DONE; } break; case CORINFO_FIELD_INTRINSIC_EMPTY_STRING: { assert(aflags & CORINFO_ACCESS_GET); // Import String.Empty as "" (GT_CNS_STR with a fake SconCPX = 0) op1 = gtNewSconNode(EMPTY_STRING_SCON, nullptr); goto FIELD_DONE; } break; case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN: { assert(aflags & CORINFO_ACCESS_GET); // Widen to stack type lclTyp = genActualType(lclTyp); #if BIGENDIAN op1 = gtNewIconNode(0, lclTyp); #else op1 = gtNewIconNode(1, lclTyp); #endif goto FIELD_DONE; } break; default: assert(!"Unexpected fieldAccessor"); } if (!isLoadAddress) { if (prefixFlags & PREFIX_VOLATILE) { op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered if (!usesHelper) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_OBJ)); op1->gtFlags |= GTF_IND_VOLATILE; } } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { if (!usesHelper) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_OBJ)); op1->gtFlags |= GTF_IND_UNALIGNED; } } } /* Check if the class needs explicit initialization */ if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { GenTree* helperNode = impInitClass(&resolvedToken); if (compDonotInline()) { return; } if (helperNode != nullptr) { op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1); } } FIELD_DONE: impPushOnStack(op1, tiRetVal); } break; case CEE_STFLD: case CEE_STSFLD: { bool isStoreStatic = (opcode == CEE_STSFLD); CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type) /* Get the CP_Fieldref index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Field); JITDUMP(" %08X", resolvedToken.token); int aflags = CORINFO_ACCESS_SET; GenTree* obj = nullptr; typeInfo* tiObj = nullptr; typeInfo tiVal; /* Pull the value from the stack */ StackEntry se = impPopStack(); op2 = se.val; tiVal = se.seTypeInfo; clsHnd = tiVal.GetClassHandle(); if (opcode == CEE_STFLD) { tiObj = &impStackTop().seTypeInfo; obj = impPopStack().val; if (impIsThis(obj)) { aflags |= CORINFO_ACCESS_THIS; } } eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo); // Figure out the type of the member. We always call canAccessField, so you always need this // handle CorInfoType ciType = fieldInfo.fieldType; fieldClsHnd = fieldInfo.structType; lclTyp = JITtype2varType(ciType); if (compIsForInlining()) { /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or * per-inst static? */ switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_STATIC_TLS: compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER); return; case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: /* We may be able to inline the field accessors in specific instantiations of generic * methods */ compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER); return; default: break; } } impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper); // Raise InvalidProgramException if static store accesses non-static field if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0)) { BADCODE("static access on an instance field"); } // We are using stfld on a static field. // We allow it, but need to eval any side-effects for obj if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr) { if (obj->gtFlags & GTF_SIDE_EFFECT) { obj = gtUnusedValNode(obj); impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } obj = nullptr; } /* Preserve 'small' int types */ if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE: #ifdef FEATURE_READYTORUN case CORINFO_FIELD_INSTANCE_WITH_BASE: #endif { /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset); DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if (StructHasOverlappingFields(typeFlags)) { op1->AsField()->gtFldMayOverlap = true; } #ifdef FEATURE_READYTORUN if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE) { op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup; } #endif op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT); if (fgAddrCouldBeNull(obj)) { op1->gtFlags |= GTF_EXCEPT; } // If object is a BYREF then our target is a value class and // it could point anywhere, example a boxed class static int if (obj->gtType == TYP_BYREF) { op1->gtFlags |= GTF_IND_TGTANYWHERE; } if (compIsForInlining() && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, nullptr, obj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } break; case CORINFO_FIELD_STATIC_TLS: #ifdef TARGET_X86 // Legacy TLS access is implemented as intrinsic on x86 only /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset); op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation break; #else fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER; FALLTHROUGH; #endif case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp, clsHnd, op2); goto SPILL_APPEND; case CORINFO_FIELD_STATIC_ADDRESS: case CORINFO_FIELD_STATIC_RVA_ADDRESS: case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp); break; default: assert(!"Unexpected fieldAccessor"); } // Create the member assignment, unless we have a TYP_STRUCT. bool deferStructAssign = (lclTyp == TYP_STRUCT); if (!deferStructAssign) { if (prefixFlags & PREFIX_VOLATILE) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND)); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND)); op1->gtFlags |= GTF_IND_UNALIGNED; } /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during importation and reads from the union as if it were a long during code generation. Though this can potentially read garbage, one can get lucky to have this working correctly. This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency on it. To be backward compatible, we will explicitly add an upward cast here so that it works correctly always. Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT for V4.0. */ CLANG_FORMAT_COMMENT_ANCHOR; #ifndef TARGET_64BIT // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be // generated for ARM as well as x86, so the following IR will be accepted: // STMTx (IL 0x... ???) // * ASG long // +--* CLS_VAR long // \--* CNS_INT int 2 if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) && varTypeIsLong(op1->TypeGet())) { op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet()); } #endif #ifdef TARGET_64BIT // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType)) { op2->gtType = TYP_I_IMPL; } else { // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity // if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT)) { op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT); } // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL); } } #endif // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE // We insert a cast to the dest 'op1' type // if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet()); } op1 = gtNewAssignNode(op1, op2); /* Mark the expression as containing an assignment */ op1->gtFlags |= GTF_ASG; } /* Check if the class needs explicit initialization */ if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { GenTree* helperNode = impInitClass(&resolvedToken); if (compDonotInline()) { return; } if (helperNode != nullptr) { op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1); } } /* stfld can interfere with value classes (consider the sequence ldloc, ldloca, ..., stfld, stloc). We will be conservative and spill all value class references from the stack. */ if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL))) { assert(tiObj); // If we can resolve the field to be within some local, // then just spill that local. // GenTreeLclVarCommon* const lcl = obj->IsLocalAddrExpr(); if (lcl != nullptr) { impSpillLclRefs(lcl->GetLclNum()); } else if (impIsValueType(tiObj)) { impSpillEvalStack(); } else { impSpillValueClasses(); } } /* Spill any refs to the same member from the stack */ impSpillLclRefs((ssize_t)resolvedToken.hField); /* stsfld also interferes with indirect accesses (for aliased statics) and calls. But don't need to spill other statics as we have explicitly spilled this particular static field. */ impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD")); if (deferStructAssign) { op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL); } } goto APPEND; case CEE_NEWARR: { /* Get the class type index operand */ _impResolveToken(CORINFO_TOKENKIND_Newarr); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { // Need to restore array classes before creating array objects on the heap op1 = impTokenToHandle(&resolvedToken, nullptr, true /*mustRestoreHandle*/); if (op1 == nullptr) { // compDonotInline() return; } } tiRetVal = verMakeTypeInfo(resolvedToken.hClass); accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); /* Form the arglist: array class handle, size */ op2 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op2->gtType)); #ifdef TARGET_64BIT // The array helper takes a native int for array length. // So if we have an int, explicitly extend it to be a native int. if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { if (op2->IsIntegralConst()) { op2->gtType = TYP_I_IMPL; } else { bool isUnsigned = false; op2 = gtNewCastNode(TYP_I_IMPL, op2, isUnsigned, TYP_I_IMPL); } } #endif // TARGET_64BIT #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF, gtNewCallArgs(op2)); usingReadyToRunHelper = (op1 != nullptr); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the newarr call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub // 3) Allocate the new array // Reason: performance (today, we'll always use the slow helper for the R2R generics case) // Need to restore array classes before creating array objects on the heap op1 = impTokenToHandle(&resolvedToken, nullptr, true /*mustRestoreHandle*/); if (op1 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { GenTreeCall::Use* args = gtNewCallArgs(op1, op2); /* Create a call to 'new' */ // Note that this only works for shared generic code because the same helper is used for all // reference array types op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args); } op1->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass; /* Remember that this basic block contains 'new' of an sd array */ block->bbFlags |= BBF_HAS_NEWARRAY; optMethodFlags |= OMF_HAS_NEWARRAY; /* Push the result of the call on the stack */ impPushOnStack(op1, tiRetVal); callTyp = TYP_REF; } break; case CEE_LOCALLOC: // We don't allow locallocs inside handlers if (block->hasHndIndex()) { BADCODE("Localloc can't be inside handler"); } // Get the size to allocate op2 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op2->gtType)); if (verCurrentState.esStackDepth != 0) { BADCODE("Localloc can only be used when the stack is empty"); } // If the localloc is not in a loop and its size is a small constant, // create a new local var of TYP_BLK and return its address. { bool convertedToLocal = false; // Need to aggressively fold here, as even fixed-size locallocs // will have casts in the way. op2 = gtFoldExpr(op2); if (op2->IsIntegralConst()) { const ssize_t allocSize = op2->AsIntCon()->IconValue(); bool bbInALoop = impBlockIsInALoop(block); if (allocSize == 0) { // Result is nullptr JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n"); op1 = gtNewIconNode(0, TYP_I_IMPL); convertedToLocal = true; } else if ((allocSize > 0) && !bbInALoop) { // Get the size threshold for local conversion ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE; #ifdef DEBUG // Optionally allow this to be modified maxSize = JitConfig.JitStackAllocToLocalSize(); #endif // DEBUG if (allocSize <= maxSize) { const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal")); JITDUMP("Converting stackalloc of %zd bytes to new local V%02u\n", allocSize, stackallocAsLocal); lvaTable[stackallocAsLocal].lvType = TYP_BLK; lvaTable[stackallocAsLocal].lvExactSize = (unsigned)allocSize; lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true; op1 = gtNewLclvNode(stackallocAsLocal, TYP_BLK); op1 = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1); convertedToLocal = true; if (!this->opts.compDbgEnC) { // Ensure we have stack security for this method. // Reorder layout since the converted localloc is treated as an unsafe buffer. setNeedsGSSecurityCookie(); compGSReorderStackLayout = true; } } } } if (!convertedToLocal) { // Bail out if inlining and the localloc was not converted. // // Note we might consider allowing the inline, if the call // site is not in a loop. if (compIsForInlining()) { InlineObservation obs = op2->IsIntegralConst() ? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE : InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN; compInlineResult->NoteFatal(obs); return; } op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2); // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd. op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE); // Ensure we have stack security for this method. setNeedsGSSecurityCookie(); /* The FP register may not be back to the original value at the end of the method, even if the frame size is 0, as localloc may have modified it. So we will HAVE to reset it */ compLocallocUsed = true; } else { compLocallocOptimized = true; } } impPushOnStack(op1, tiRetVal); break; case CEE_ISINST: { /* Get the type token */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Casting); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false); if (optTree != nullptr) { impPushOnStack(optTree, tiRetVal); } else { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeCall* opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF, gtNewCallArgs(op1)); usingReadyToRunHelper = (opLookup != nullptr); op1 = (usingReadyToRunHelper ? opLookup : op1); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the isinstanceof_any call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate // stub // 3) Perform the 'is instance' check on the input object // Reason: performance (today, we'll always use the slow helper for the R2R generics case) op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false, opcodeOffs); } if (compDonotInline()) { return; } impPushOnStack(op1, tiRetVal); } break; } case CEE_REFANYVAL: // get the class handle and make a ICON node out of it _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = impTokenToHandle(&resolvedToken); if (op2 == nullptr) { // compDonotInline() return; } op1 = impPopStack().val; // make certain it is normalized; op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL); // Call helper GETREFANY(classHandle, op1); op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, gtNewCallArgs(op2, op1)); impPushOnStack(op1, tiRetVal); break; case CEE_REFANYTYPE: op1 = impPopStack().val; // make certain it is normalized; op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL); if (op1->gtOper == GT_OBJ) { // Get the address of the refany op1 = op1->AsOp()->gtOp1; // Fetch the type from the correct slot op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL)); op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1); } else { assertImp(op1->gtOper == GT_MKREFANY); // The pointer may have side-effects if (op1->AsOp()->gtOp1->gtFlags & GTF_SIDE_EFFECT) { impAppendTree(op1->AsOp()->gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); #ifdef DEBUG impNoteLastILoffs(); #endif } // We already have the class handle op1 = op1->AsOp()->gtOp2; } // convert native TypeHandle to RuntimeTypeHandle { GenTreeCall::Use* helperArgs = gtNewCallArgs(op1); op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL, TYP_STRUCT, helperArgs); CORINFO_CLASS_HANDLE classHandle = impGetTypeHandleClass(); // The handle struct is returned in register op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType(); op1->AsCall()->gtRetClsHnd = classHandle; #if FEATURE_MULTIREG_RET op1->AsCall()->InitializeStructReturnType(this, classHandle, op1->AsCall()->GetUnmanagedCallConv()); #endif tiRetVal = typeInfo(TI_STRUCT, classHandle); } impPushOnStack(op1, tiRetVal); break; case CEE_LDTOKEN: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); lastLoadToken = codeAddr; _impResolveToken(CORINFO_TOKENKIND_Ldtoken); tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken); op1 = impTokenToHandle(&resolvedToken, nullptr, true); if (op1 == nullptr) { // compDonotInline() return; } helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE; assert(resolvedToken.hClass != nullptr); if (resolvedToken.hMethod != nullptr) { helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD; } else if (resolvedToken.hField != nullptr) { helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD; } GenTreeCall::Use* helperArgs = gtNewCallArgs(op1); op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs); // The handle struct is returned in register and // it could be consumed both as `TYP_STRUCT` and `TYP_REF`. op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType(); #if FEATURE_MULTIREG_RET op1->AsCall()->InitializeStructReturnType(this, tokenType, op1->AsCall()->GetUnmanagedCallConv()); #endif op1->AsCall()->gtRetClsHnd = tokenType; tiRetVal = verMakeTypeInfo(tokenType); impPushOnStack(op1, tiRetVal); } break; case CEE_UNBOX: case CEE_UNBOX_ANY: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); bool runtimeLookup; op2 = impTokenToHandle(&resolvedToken, &runtimeLookup); if (op2 == nullptr) { assert(compDonotInline()); return; } // Run this always so we can get access exceptions even with SkipVerification. accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass)) { JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n"); op1 = impPopStack().val; goto CASTCLASS; } /* Pop the object and create the unbox helper call */ /* You might think that for UNBOX_ANY we need to push a different */ /* (non-byref) type, but here we're making the tiRetVal that is used */ /* for the intermediate pointer which we then transfer onto the OBJ */ /* instruction. OBJ then creates the appropriate tiRetVal. */ op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass); assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE); // Check legality and profitability of inline expansion for unboxing. const bool canExpandInline = (helper == CORINFO_HELP_UNBOX); const bool shouldExpandInline = !compCurBB->isRunRarely() && opts.OptimizationEnabled(); if (canExpandInline && shouldExpandInline) { // See if we know anything about the type of op1, the object being unboxed. bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(op1, &isExact, &isNonNull); // We can skip the "exact" bit here as we are comparing to a value class. // compareTypesForEquality should bail on comparisions for shared value classes. if (clsHnd != NO_CLASS_HANDLE) { const TypeCompareState compare = info.compCompHnd->compareTypesForEquality(resolvedToken.hClass, clsHnd); if (compare == TypeCompareState::Must) { JITDUMP("\nOptimizing %s (%s) -- type test will succeed\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", eeGetClassName(clsHnd)); // For UNBOX, null check (if necessary), and then leave the box payload byref on the stack. if (opcode == CEE_UNBOX) { GenTree* cloneOperand; op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("optimized unbox clone")); GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* boxPayloadAddress = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, boxPayloadOffset); GenTree* nullcheck = gtNewNullCheck(op1, block); GenTree* result = gtNewOperNode(GT_COMMA, TYP_BYREF, nullcheck, boxPayloadAddress); impPushOnStack(result, tiRetVal); break; } // For UNBOX.ANY load the struct from the box payload byref (the load will nullcheck) assert(opcode == CEE_UNBOX_ANY); GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* boxPayloadAddress = gtNewOperNode(GT_ADD, TYP_BYREF, op1, boxPayloadOffset); impPushOnStack(boxPayloadAddress, tiRetVal); oper = GT_OBJ; goto OBJ; } else { JITDUMP("\nUnable to optimize %s -- can't resolve type comparison\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY"); } } else { JITDUMP("\nUnable to optimize %s -- class for [%06u] not known\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", dspTreeID(op1)); } JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY"); // we are doing normal unboxing // inline the common case of the unbox helper // UNBOX(exp) morphs into // clone = pop(exp); // ((*clone == typeToken) ? nop : helper(clone, typeToken)); // push(clone + TARGET_POINTER_SIZE) // GenTree* cloneOperand; op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("inline UNBOX clone1")); op1 = gtNewMethodTableLookup(op1); GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2); op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("inline UNBOX clone2")); op2 = impTokenToHandle(&resolvedToken); if (op2 == nullptr) { // compDonotInline() return; } op1 = gtNewHelperCallNode(helper, TYP_VOID, gtNewCallArgs(op2, op1)); op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1); op1 = gtNewQmarkNode(TYP_VOID, condBox, op1->AsColon()); // QMARK nodes cannot reside on the evaluation stack. Because there // may be other trees on the evaluation stack that side-effect the // sources of the UNBOX operation we must spill the stack. impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); // Create the address-expression to reference past the object header // to the beginning of the value-type. Today this means adjusting // past the base of the objects vtable field which is pointer sized. op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2); } else { JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal"); // Don't optimize, just call the helper and be done with it op1 = gtNewHelperCallNode(helper, (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT), gtNewCallArgs(op2, op1)); if (op1->gtType == TYP_STRUCT) { op1->AsCall()->gtRetClsHnd = resolvedToken.hClass; } } assert((helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF) || // Unbox helper returns a byref. (helper == CORINFO_HELP_UNBOX_NULLABLE && varTypeIsStruct(op1)) // UnboxNullable helper returns a struct. ); /* ---------------------------------------------------------------------- | \ helper | | | | \ | | | | \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE | | \ | (which returns a BYREF) | (which returns a STRUCT) | | | opcode \ | | | |--------------------------------------------------------------------- | UNBOX | push the BYREF | spill the STRUCT to a local, | | | | push the BYREF to this local | |--------------------------------------------------------------------- | UNBOX_ANY | push a GT_OBJ of | push the STRUCT | | | the BYREF | For Linux when the | | | | struct is returned in two | | | | registers create a temp | | | | which address is passed to | | | | the unbox_nullable helper. | |--------------------------------------------------------------------- */ if (opcode == CEE_UNBOX) { if (helper == CORINFO_HELP_UNBOX_NULLABLE) { // Unbox nullable helper returns a struct type. // We need to spill it to a temp so than can take the address of it. // Here we need unsafe value cls check, since the address of struct is taken to be used // further along and potetially be exploitable. unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable")); lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); op2 = gtNewLclvNode(tmp, TYP_STRUCT); op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclvNode(tmp, TYP_STRUCT); op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2); op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2); } assert(op1->gtType == TYP_BYREF); } else { assert(opcode == CEE_UNBOX_ANY); if (helper == CORINFO_HELP_UNBOX) { // Normal unbox helper returns a TYP_BYREF. impPushOnStack(op1, tiRetVal); oper = GT_OBJ; goto OBJ; } assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!"); #if FEATURE_MULTIREG_RET if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass, CorInfoCallConvExtension::Managed)) { // Unbox nullable helper returns a TYP_STRUCT. // For the multi-reg case we need to spill it to a temp so that // we can pass the address to the unbox_nullable jit helper. unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable")); lvaTable[tmp].lvIsMultiRegArg = true; lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); op2 = gtNewLclvNode(tmp, TYP_STRUCT); op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclvNode(tmp, TYP_STRUCT); op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2); op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2); // In this case the return value of the unbox helper is TYP_BYREF. // Make sure the right type is placed on the operand type stack. impPushOnStack(op1, tiRetVal); // Load the struct. oper = GT_OBJ; assert(op1->gtType == TYP_BYREF); goto OBJ; } else #endif // !FEATURE_MULTIREG_RET { // If non register passable struct we have it materialized in the RetBuf. assert(op1->gtType == TYP_STRUCT); tiRetVal = verMakeTypeInfo(resolvedToken.hClass); assert(tiRetVal.IsValueClass()); } } impPushOnStack(op1, tiRetVal); } break; case CEE_BOX: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Box); JITDUMP(" %08X", resolvedToken.token); accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); // Note BOX can be used on things that are not value classes, in which // case we get a NOP. However the verifier's view of the type on the // stack changes (in generic code a 'T' becomes a 'boxed T') if (!eeIsValueClass(resolvedToken.hClass)) { JITDUMP("\n Importing BOX(refClass) as NOP\n"); verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal; break; } // Look ahead for box idioms int matched = impBoxPatternMatch(&resolvedToken, codeAddr + sz, codeEndp); if (matched >= 0) { // Skip the matched IL instructions sz += matched; break; } impImportAndPushBox(&resolvedToken); if (compDonotInline()) { return; } } break; case CEE_SIZEOF: /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass)); impPushOnStack(op1, tiRetVal); break; case CEE_CASTCLASS: /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Casting); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; /* Pop the address and create the 'checked cast' helper call */ // At this point we expect typeRef to contain the token, op1 to contain the value being cast, // and op2 to contain code that creates the type handle corresponding to typeRef CASTCLASS: { GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true); if (optTree != nullptr) { impPushOnStack(optTree, tiRetVal); } else { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeCall* opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF, gtNewCallArgs(op1)); usingReadyToRunHelper = (opLookup != nullptr); op1 = (usingReadyToRunHelper ? opLookup : op1); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the chkcastany call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate // stub // 3) Check the object on the stack for the type-cast // Reason: performance (today, we'll always use the slow helper for the R2R generics case) op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true, opcodeOffs); } if (compDonotInline()) { return; } /* Push the result back on the stack */ impPushOnStack(op1, tiRetVal); } } break; case CEE_THROW: // Any block with a throw is rarely executed. block->bbSetRunRarely(); // Pop the exception object and create the 'throw' helper call op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewCallArgs(impPopStack().val)); // Fall through to clear out the eval stack. EVAL_APPEND: if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } assert(verCurrentState.esStackDepth == 0); goto APPEND; case CEE_RETHROW: assert(!compIsForInlining()); if (info.compXcptnsCount == 0) { BADCODE("rethrow outside catch"); } /* Create the 'rethrow' helper call */ op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID); goto EVAL_APPEND; case CEE_INITOBJ: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = gtNewIconNode(0); // Value op1 = impPopStack().val; // Dest if (eeIsValueClass(resolvedToken.hClass)) { op1 = gtNewStructVal(resolvedToken.hClass, op1); if (op1->OperIs(GT_OBJ)) { gtSetObjGcInfo(op1->AsObj()); } } else { size = info.compCompHnd->getClassSize(resolvedToken.hClass); assert(size == TARGET_POINTER_SIZE); op1 = gtNewBlockVal(op1, size); } op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false); goto SPILL_APPEND; case CEE_INITBLK: op3 = impPopStack().val; // Size op2 = impPopStack().val; // Value op1 = impPopStack().val; // Dst addr if (op3->IsCnsIntOrI()) { size = (unsigned)op3->AsIntConCommon()->IconValue(); op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size)); op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false); } else { if (!op2->IsIntegralConst(0)) { op2 = gtNewOperNode(GT_INIT_VAL, TYP_INT, op2); } op1 = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(op1, op2, op3); size = 0; if ((prefixFlags & PREFIX_VOLATILE) != 0) { op1->gtFlags |= GTF_BLK_VOLATILE; } } goto SPILL_APPEND; case CEE_CPBLK: op3 = impPopStack().val; // Size op2 = impPopStack().val; // Src addr op1 = impPopStack().val; // Dst addr if (op2->OperGet() == GT_ADDR) { op2 = op2->AsOp()->gtOp1; } else { op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2); } if (op3->IsCnsIntOrI()) { size = (unsigned)op3->AsIntConCommon()->IconValue(); op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size)); op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, true); } else { op1 = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(op1, op2, op3); size = 0; if ((prefixFlags & PREFIX_VOLATILE) != 0) { op1->gtFlags |= GTF_BLK_VOLATILE; } } goto SPILL_APPEND; case CEE_CPOBJ: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); if (!eeIsValueClass(resolvedToken.hClass)) { op1 = impPopStack().val; // address to load from impBashVarAddrsToI(op1); assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); op1 = gtNewOperNode(GT_IND, TYP_REF, op1); op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF; impPushOnStack(op1, typeInfo()); opcode = CEE_STIND_REF; lclTyp = TYP_REF; goto STIND; } op2 = impPopStack().val; // Src op1 = impPopStack().val; // Dest op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0)); goto SPILL_APPEND; case CEE_STOBJ: { assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); if (eeIsValueClass(resolvedToken.hClass)) { lclTyp = TYP_STRUCT; } else { lclTyp = TYP_REF; } if (lclTyp == TYP_REF) { opcode = CEE_STIND_REF; goto STIND; } CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { lclTyp = JITtype2varType(jitTyp); goto STIND; } op2 = impPopStack().val; // Value op1 = impPopStack().val; // Ptr assertImp(varTypeIsStruct(op2)); op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED)) { op1->gtFlags |= GTF_BLK_UNALIGNED; } goto SPILL_APPEND; } case CEE_MKREFANY: assert(!compIsForInlining()); // Being lazy here. Refanys are tricky in terms of gc tracking. // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany. JITDUMP("disabling struct promotion because of mkrefany\n"); fgNoStructPromotion = true; oper = GT_MKREFANY; assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = impTokenToHandle(&resolvedToken, nullptr, true); if (op2 == nullptr) { // compDonotInline() return; } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec. // But JIT32 allowed it, so we continue to allow it. assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT); // MKREFANY returns a struct. op2 is the class token. op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2); impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass())); break; case CEE_LDOBJ: { oper = GT_OBJ; assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); OBJ: tiRetVal = verMakeTypeInfo(resolvedToken.hClass); if (eeIsValueClass(resolvedToken.hClass)) { lclTyp = TYP_STRUCT; } else { lclTyp = TYP_REF; opcode = CEE_LDIND_REF; goto LDIND; } op1 = impPopStack().val; assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL); CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1); // Could point anywhere, example a boxed class static int op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF; assertImp(varTypeIsArithmetic(op1->gtType)); } else { // OBJ returns a struct // and an inline argument which is the class token of the loaded obj op1 = gtNewObjNode(resolvedToken.hClass, op1); } op1->gtFlags |= GTF_EXCEPT; if (prefixFlags & PREFIX_UNALIGNED) { op1->gtFlags |= GTF_IND_UNALIGNED; } impPushOnStack(op1, tiRetVal); break; } case CEE_LDLEN: op1 = impPopStack().val; if (opts.OptimizationEnabled()) { /* Use GT_ARR_LENGTH operator so rng check opts see this */ GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_Array__length, block); op1 = arrLen; } else { /* Create the expression "*(array_addr + ArrLenOffs)" */ op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(OFFSETOF__CORINFO_Array__length, TYP_I_IMPL)); op1 = gtNewIndir(TYP_INT, op1); } /* Push the result back on the stack */ impPushOnStack(op1, tiRetVal); break; case CEE_BREAK: op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID); goto SPILL_APPEND; case CEE_NOP: if (opts.compDbgCode) { op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); goto SPILL_APPEND; } break; /******************************** NYI *******************************/ case 0xCC: OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n"); FALLTHROUGH; case CEE_ILLEGAL: case CEE_MACRO_END: default: if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR); return; } BADCODE3("unknown opcode", ": %02X", (int)opcode); } codeAddr += sz; prevOpcode = opcode; prefixFlags = 0; } return; #undef _impResolveToken } #ifdef _PREFAST_ #pragma warning(pop) #endif // Push a local/argument treeon the operand stack void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal) { tiRetVal.NormaliseForStack(); if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr()) { tiRetVal.SetUninitialisedObjRef(); } impPushOnStack(op, tiRetVal); } //------------------------------------------------------------------------ // impCreateLocal: create a GT_LCL_VAR node to access a local that might need to be normalized on load // // Arguments: // lclNum -- The index into lvaTable // offset -- The offset to associate with the node // // Returns: // The node // GenTreeLclVar* Compiler::impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset)) { var_types lclTyp; if (lvaTable[lclNum].lvNormalizeOnLoad()) { lclTyp = lvaGetRealType(lclNum); } else { lclTyp = lvaGetActualType(lclNum); } return gtNewLclvNode(lclNum, lclTyp DEBUGARG(offset)); } // Load a local/argument on the operand stack // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal) { impPushVar(impCreateLocalNode(lclNum DEBUGARG(offset)), tiRetVal); } // Load an argument on the operand stack // Shared by the various CEE_LDARG opcodes // ilArgNum is the argument index as specified in IL. // It will be mapped to the correct lvaTable index void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset) { Verify(ilArgNum < info.compILargsCount, "bad arg num"); if (compIsForInlining()) { if (ilArgNum >= info.compArgsCount) { compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER); return; } impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo), impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo); } else { if (ilArgNum >= info.compArgsCount) { BADCODE("Bad IL"); } unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } impLoadVar(lclNum, offset); } } // Load a local on the operand stack // Shared by the various CEE_LDLOC opcodes // ilLclNum is the local index as specified in IL. // It will be mapped to the correct lvaTable index void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset) { if (compIsForInlining()) { if (ilLclNum >= info.compMethodInfo->locals.numArgs) { compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER); return; } // Get the local type var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo; typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo; /* Have we allocated a temp for this local? */ unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp")); // All vars of inlined methods should be !lvNormalizeOnLoad() assert(!lvaTable[lclNum].lvNormalizeOnLoad()); lclTyp = genActualType(lclTyp); impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal); } else { if (ilLclNum >= info.compMethodInfo->locals.numArgs) { BADCODE("Bad IL"); } unsigned lclNum = info.compArgsCount + ilLclNum; impLoadVar(lclNum, offset); } } #ifdef TARGET_ARM /************************************************************************************** * * When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the * dst struct, because struct promotion will turn it into a float/double variable while * the rhs will be an int/long variable. We don't code generate assignment of int into * a float, but there is nothing that might prevent us from doing so. The tree however * would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int)) * * tmpNum - the lcl dst variable num that is a struct. * src - the src tree assigned to the dest that is a struct/int (when varargs call.) * hClass - the type handle for the struct variable. * * TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play, * however, we could do a codegen of transferring from int to float registers * (transfer, not a cast.) * */ void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass) { if (src->gtOper == GT_CALL && src->AsCall()->IsVarargs() && IsHfa(hClass)) { int hfaSlots = GetHfaCount(hClass); var_types hfaType = GetHfaType(hClass); // If we have varargs we morph the method's return type to be "int" irrespective of its original // type: struct/float at importer because the ABI calls out return in integer registers. // We don't want struct promotion to replace an expression like this: // lclFld_int = callvar_int() into lclFld_float = callvar_int(); // This means an int is getting assigned to a float without a cast. Prevent the promotion. if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) || (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES)) { // Make sure this struct type stays as struct so we can receive the call in a struct. lvaTable[tmpNum].lvIsMultiRegRet = true; } } } #endif // TARGET_ARM #if FEATURE_MULTIREG_RET //------------------------------------------------------------------------ // impAssignMultiRegTypeToVar: ensure calls that return structs in multiple // registers return values to suitable temps. // // Arguments: // op -- call returning a struct in registers // hClass -- class handle for struct // // Returns: // Tree with reference to struct local to use as call return value. GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return")); impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL); GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType); // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. ret->gtFlags |= GTF_DONT_CSE; assert(IsMultiRegReturnedType(hClass, callConv)); // Mark the var so that fields are not promoted and stay together. lvaTable[tmpNum].lvIsMultiRegRet = true; return ret; } #endif // FEATURE_MULTIREG_RET //------------------------------------------------------------------------ // impReturnInstruction: import a return or an explicit tail call // // Arguments: // prefixFlags -- active IL prefixes // opcode -- [in, out] IL opcode // // Returns: // True if import was successful (may fail for some inlinees) // bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) { const bool isTailCall = (prefixFlags & PREFIX_TAILCALL) != 0; #ifdef DEBUG // If we are importing an inlinee and have GC ref locals we always // need to have a spill temp for the return value. This temp // should have been set up in advance, over in fgFindBasicBlocks. if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID)) { assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM); } #endif // DEBUG GenTree* op2 = nullptr; GenTree* op1 = nullptr; CORINFO_CLASS_HANDLE retClsHnd = nullptr; if (info.compRetType != TYP_VOID) { StackEntry se = impPopStack(); retClsHnd = se.seTypeInfo.GetClassHandle(); op2 = se.val; if (!compIsForInlining()) { impBashVarAddrsToI(op2); op2 = impImplicitIorI4Cast(op2, info.compRetType); op2 = impImplicitR4orR8Cast(op2, info.compRetType); // Note that we allow TYP_I_IMPL<->TYP_BYREF transformation, but only TYP_I_IMPL<-TYP_REF. assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) || ((op2->TypeGet() == TYP_I_IMPL) && TypeIs(info.compRetType, TYP_BYREF)) || (op2->TypeIs(TYP_BYREF, TYP_REF) && (info.compRetType == TYP_I_IMPL)) || (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) || (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType))); #ifdef DEBUG if (!isTailCall && opts.compGcChecks && (info.compRetType == TYP_REF)) { // DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with // one-return BB. assert(op2->gtType == TYP_REF); // confirm that the argument is a GC pointer (for debugging (GC stress)) GenTreeCall::Use* args = gtNewCallArgs(op2); op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args); if (verbose) { printf("\ncompGcChecks tree:\n"); gtDispTree(op2); } } #endif } else { if (verCurrentState.esStackDepth != 0) { assert(compIsForInlining()); JITDUMP("CALLSITE_COMPILATION_ERROR: inlinee's stack is not empty."); compInlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); return false; } #ifdef DEBUG if (verbose) { printf("\n\n Inlinee Return expression (before normalization) =>\n"); gtDispTree(op2); } #endif // Make sure the type matches the original call. var_types returnType = genActualType(op2->gtType); var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType; if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT)) { originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass); } if (returnType != originalCallType) { // Allow TYP_BYREF to be returned as TYP_I_IMPL and vice versa. // Allow TYP_REF to be returned as TYP_I_IMPL and NOT vice verse. if ((TypeIs(returnType, TYP_BYREF, TYP_REF) && (originalCallType == TYP_I_IMPL)) || ((returnType == TYP_I_IMPL) && TypeIs(originalCallType, TYP_BYREF))) { JITDUMP("Allowing return type mismatch: have %s, needed %s\n", varTypeName(returnType), varTypeName(originalCallType)); } else { JITDUMP("Return type mismatch: have %s, needed %s\n", varTypeName(returnType), varTypeName(originalCallType)); compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH); return false; } } // Below, we are going to set impInlineInfo->retExpr to the tree with the return // expression. At this point, retExpr could already be set if there are multiple // return blocks (meaning fgNeedReturnSpillTemp() == true) and one of // the other blocks already set it. If there is only a single return block, // retExpr shouldn't be set. However, this is not true if we reimport a block // with a return. In that case, retExpr will be set, then the block will be // reimported, but retExpr won't get cleared as part of setting the block to // be reimported. The reimported retExpr value should be the same, so even if // we don't unconditionally overwrite it, it shouldn't matter. if (info.compRetNativeType != TYP_STRUCT) { // compRetNativeType is not TYP_STRUCT. // This implies it could be either a scalar type or SIMD vector type or // a struct type that can be normalized to a scalar type. if (varTypeIsStruct(info.compRetType)) { noway_assert(info.compRetBuffArg == BAD_VAR_NUM); // adjust the type away from struct to integral // and no normalizing op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv); } else { // Do we have to normalize? var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType); if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) && fgCastNeeded(op2, fncRealRetType)) { // Small-typed return values are normalized by the callee op2 = gtNewCastNode(TYP_INT, op2, false, fncRealRetType); } } if (fgNeedReturnSpillTemp()) { assert(info.compRetNativeType != TYP_VOID && (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals())); // If this method returns a ref type, track the actual types seen // in the returns. if (info.compRetType == TYP_REF) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull); if (impInlineInfo->retExpr == nullptr) { // This is the first return, so best known type is the type // of this return value. impInlineInfo->retExprClassHnd = returnClsHnd; impInlineInfo->retExprClassHndIsExact = isExact; } else if (impInlineInfo->retExprClassHnd != returnClsHnd) { // This return site type differs from earlier seen sites, // so reset the info and we'll fall back to using the method's // declared return type for the return spill temp. impInlineInfo->retExprClassHnd = nullptr; impInlineInfo->retExprClassHndIsExact = false; } } impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); var_types lclRetType = lvaGetDesc(lvaInlineeReturnSpillTemp)->lvType; GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, lclRetType); op2 = tmpOp2; #ifdef DEBUG if (impInlineInfo->retExpr) { // Some other block(s) have seen the CEE_RET first. // Better they spilled to the same temp. assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR); assert(impInlineInfo->retExpr->AsLclVarCommon()->GetLclNum() == op2->AsLclVarCommon()->GetLclNum()); } #endif } #ifdef DEBUG if (verbose) { printf("\n\n Inlinee Return expression (after normalization) =>\n"); gtDispTree(op2); } #endif // Report the return expression impInlineInfo->retExpr = op2; } else { // compRetNativeType is TYP_STRUCT. // This implies that struct return via RetBuf arg or multi-reg struct return GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall(); // Assign the inlinee return into a spill temp. // spill temp only exists if there are multiple return points if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM) { // in this case we have to insert multiple struct copies to the temp // and the retexpr is just the temp. assert(info.compRetNativeType != TYP_VOID); assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()); impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); } #if defined(TARGET_ARM) || defined(UNIX_AMD64_ABI) #if defined(TARGET_ARM) // TODO-ARM64-NYI: HFA // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the // next ifdefs could be refactored in a single method with the ifdef inside. if (IsHfa(retClsHnd)) { // Same as !IsHfa but just don't bother with impAssignStructPtr. #else // defined(UNIX_AMD64_ABI) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { // If single eightbyte, the return type would have been normalized and there won't be a temp var. // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes - // max allowed.) assert(retRegCount == MAX_RET_REG_COUNT); // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr. CLANG_FORMAT_COMMENT_ANCHOR; #endif // defined(UNIX_AMD64_ABI) if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { #if defined(TARGET_ARM) impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType); #else // defined(UNIX_AMD64_ABI) // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); #endif // defined(UNIX_AMD64_ABI) } } else { impInlineInfo->retExpr = op2; } } else #elif defined(TARGET_ARM64) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { assert(!iciCall->HasRetBufArg()); assert(retRegCount >= 2); if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); } } else { impInlineInfo->retExpr = op2; } } else #elif defined(TARGET_X86) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { assert(!iciCall->HasRetBufArg()); assert(retRegCount == MAX_RET_REG_COUNT); if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); } } else { impInlineInfo->retExpr = op2; } } else #endif // defined(TARGET_ARM64) { assert(iciCall->HasRetBufArg()); GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->GetNode()); // spill temp only exists if there are multiple return points if (fgNeedReturnSpillTemp()) { // if this is the first return we have seen set the retExpr if (!impInlineInfo->retExpr) { impInlineInfo->retExpr = impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType), retClsHnd, (unsigned)CHECK_SPILL_ALL); } } else { impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL); } } } if (impInlineInfo->retExpr != nullptr) { impInlineInfo->retBB = compCurBB; } } } if (compIsForInlining()) { return true; } if (info.compRetType == TYP_VOID) { // return void op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID); } else if (info.compRetBuffArg != BAD_VAR_NUM) { // Assign value to return buff (first param) GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF DEBUGARG(impCurStmtDI.GetLocation().GetOffset())); op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL); impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX). CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_AMD64) // x64 (System V and Win64) calling convention requires to // return the implicit return buffer explicitly (in RAX). // Change the return type to be BYREF. op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); #else // !defined(TARGET_AMD64) // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX). // In such case the return value of the function is changed to BYREF. // If profiler hook is not needed the return type of the function is TYP_VOID. if (compIsProfilerHookNeeded()) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #if defined(TARGET_ARM64) // On ARM64, the native instance calling convention variant // requires the implicit ByRef to be explicitly returned. else if (TargetOS::IsWindows && callConvIsInstanceMethodCallConv(info.compCallConv)) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #endif #if defined(TARGET_X86) else if (info.compCallConv != CorInfoCallConvExtension::Managed) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #endif else { // return void op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID); } #endif // !defined(TARGET_AMD64) } else if (varTypeIsStruct(info.compRetType)) { #if !FEATURE_MULTIREG_RET // For both ARM architectures the HFA native types are maintained as structs. // Also on System V AMD64 the multireg structs returns are also left as structs. noway_assert(info.compRetNativeType != TYP_STRUCT); #endif op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv); // return op2 var_types returnType = info.compRetType; op1 = gtNewOperNode(GT_RETURN, genActualType(returnType), op2); } else { // return op2 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2); } // We must have imported a tailcall and jumped to RET if (isTailCall) { assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode)); opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES // impImportCall() would have already appended TYP_VOID calls if (info.compRetType == TYP_VOID) { return true; } } impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG // Remember at which BC offset the tree was finished impNoteLastILoffs(); #endif return true; } /***************************************************************************** * Mark the block as unimported. * Note that the caller is responsible for calling impImportBlockPending(), * with the appropriate stack-state */ inline void Compiler::impReimportMarkBlock(BasicBlock* block) { #ifdef DEBUG if (verbose && (block->bbFlags & BBF_IMPORTED)) { printf("\n" FMT_BB " will be reimported\n", block->bbNum); } #endif block->bbFlags &= ~BBF_IMPORTED; } /***************************************************************************** * Mark the successors of the given block as unimported. * Note that the caller is responsible for calling impImportBlockPending() * for all the successors, with the appropriate stack-state. */ void Compiler::impReimportMarkSuccessors(BasicBlock* block) { for (BasicBlock* const succBlock : block->Succs()) { impReimportMarkBlock(succBlock); } } /***************************************************************************** * * Filter wrapper to handle only passed in exception code * from it). */ LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam) { if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION) { return EXCEPTION_EXECUTE_HANDLER; } return EXCEPTION_CONTINUE_SEARCH; } void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart) { assert(block->hasTryIndex()); assert(!compIsForInlining()); unsigned tryIndex = block->getTryIndex(); EHblkDsc* HBtab = ehGetDsc(tryIndex); if (isTryStart) { assert(block->bbFlags & BBF_TRY_BEG); // The Stack must be empty // if (block->bbStkDepth != 0) { BADCODE("Evaluation stack must be empty on entry into a try block"); } } // Save the stack contents, we'll need to restore it later // SavedStack blockState; impSaveStackState(&blockState, false); while (HBtab != nullptr) { if (isTryStart) { // Are we verifying that an instance constructor properly initializes it's 'this' pointer once? // We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions // if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init)) { // We trigger an invalid program exception here unless we have a try/fault region. // if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter()) { BADCODE( "The 'this' pointer of an instance constructor is not intialized upon entry to a try region"); } else { // Allow a try/fault region to proceed. assert(HBtab->HasFaultHandler()); } } } // Recursively process the handler block, if we haven't already done so. BasicBlock* hndBegBB = HBtab->ebdHndBeg; if (((hndBegBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(hndBegBB) == 0)) { // Construct the proper verification stack state // either empty or one that contains just // the Exception Object that we are dealing with // verCurrentState.esStackDepth = 0; if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp)) { CORINFO_CLASS_HANDLE clsHnd; if (HBtab->HasFilter()) { clsHnd = impGetObjectClass(); } else { CORINFO_RESOLVED_TOKEN resolvedToken; resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; resolvedToken.token = HBtab->ebdTyp; resolvedToken.tokenType = CORINFO_TOKENKIND_Class; info.compCompHnd->resolveToken(&resolvedToken); clsHnd = resolvedToken.hClass; } // push catch arg the stack, spill to a temp if necessary // Note: can update HBtab->ebdHndBeg! hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false); } // Queue up the handler for importing // impImportBlockPending(hndBegBB); } // Process the filter block, if we haven't already done so. if (HBtab->HasFilter()) { /* @VERIFICATION : Ideally the end of filter state should get propagated to the catch handler, this is an incompleteness, but is not a security/compliance issue, since the only interesting state is the 'thisInit' state. */ BasicBlock* filterBB = HBtab->ebdFilter; if (((filterBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(filterBB) == 0)) { verCurrentState.esStackDepth = 0; // push catch arg the stack, spill to a temp if necessary // Note: can update HBtab->ebdFilter! const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB); filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter); impImportBlockPending(filterBB); } } // This seems redundant ....?? if (verTrackObjCtorInitState && HBtab->HasFaultHandler()) { /* Recursively process the handler block */ verCurrentState.esStackDepth = 0; // Queue up the fault handler for importing // impImportBlockPending(HBtab->ebdHndBeg); } // Now process our enclosing try index (if any) // tryIndex = HBtab->ebdEnclosingTryIndex; if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { HBtab = nullptr; } else { HBtab = ehGetDsc(tryIndex); } } // Restore the stack contents impRestoreStackState(&blockState); } //*************************************************************** // Import the instructions for the given basic block. Perform // verification, throwing an exception on failure. Push any successor blocks that are enabled for the first // time, or whose verification pre-state is changed. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif void Compiler::impImportBlock(BasicBlock* block) { // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to // handle them specially. In particular, there is no IL to import for them, but we do need // to mark them as imported and put their successors on the pending import list. if (block->bbFlags & BBF_INTERNAL) { JITDUMP("Marking BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", block->bbNum); block->bbFlags |= BBF_IMPORTED; for (BasicBlock* const succBlock : block->Succs()) { impImportBlockPending(succBlock); } return; } bool markImport; assert(block); /* Make the block globaly available */ compCurBB = block; #ifdef DEBUG /* Initialize the debug variables */ impCurOpcName = "unknown"; impCurOpcOffs = block->bbCodeOffs; #endif /* Set the current stack state to the merged result */ verResetCurrentState(block, &verCurrentState); /* Now walk the code and import the IL into GenTrees */ struct FilterVerificationExceptionsParam { Compiler* pThis; BasicBlock* block; }; FilterVerificationExceptionsParam param; param.pThis = this; param.block = block; PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param) { /* @VERIFICATION : For now, the only state propagation from try to it's handler is "thisInit" state (stack is empty at start of try). In general, for state that we track in verification, we need to model the possibility that an exception might happen at any IL instruction, so we really need to merge all states that obtain between IL instructions in a try block into the start states of all handlers. However we do not allow the 'this' pointer to be uninitialized when entering most kinds try regions (only try/fault are allowed to have an uninitialized this pointer on entry to the try) Fortunately, the stack is thrown away when an exception leads to a handler, so we don't have to worry about that. We DO, however, have to worry about the "thisInit" state. But only for the try/fault case. The only allowed transition is from TIS_Uninit to TIS_Init. So for a try/fault region for the fault handler block we will merge the start state of the try begin and the post-state of each block that is part of this try region */ // merge the start state of the try begin // if (pParam->block->bbFlags & BBF_TRY_BEG) { pParam->pThis->impVerifyEHBlock(pParam->block, true); } pParam->pThis->impImportBlockCode(pParam->block); // As discussed above: // merge the post-state of each block that is part of this try region // if (pParam->block->hasTryIndex()) { pParam->pThis->impVerifyEHBlock(pParam->block, false); } } PAL_EXCEPT_FILTER(FilterVerificationExceptions) { verHandleVerificationFailure(block DEBUGARG(false)); } PAL_ENDTRY if (compDonotInline()) { return; } assert(!compDonotInline()); markImport = false; SPILLSTACK: unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks bool reimportSpillClique = false; BasicBlock* tgtBlock = nullptr; /* If the stack is non-empty, we might have to spill its contents */ if (verCurrentState.esStackDepth != 0) { impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something // on the stack, its lifetime is hard to determine, simply // don't reuse such temps. Statement* addStmt = nullptr; /* Do the successors of 'block' have any other predecessors ? We do not want to do some of the optimizations related to multiRef if we can reimport blocks */ unsigned multRef = impCanReimport ? unsigned(~0) : 0; switch (block->bbJumpKind) { case BBJ_COND: addStmt = impExtractLastStmt(); assert(addStmt->GetRootNode()->gtOper == GT_JTRUE); /* Note if the next block has more than one ancestor */ multRef |= block->bbNext->bbRefs; /* Does the next block have temps assigned? */ baseTmp = block->bbNext->bbStkTempsIn; tgtBlock = block->bbNext; if (baseTmp != NO_BASE_TMP) { break; } /* Try the target of the jump then */ multRef |= block->bbJumpDest->bbRefs; baseTmp = block->bbJumpDest->bbStkTempsIn; tgtBlock = block->bbJumpDest; break; case BBJ_ALWAYS: multRef |= block->bbJumpDest->bbRefs; baseTmp = block->bbJumpDest->bbStkTempsIn; tgtBlock = block->bbJumpDest; break; case BBJ_NONE: multRef |= block->bbNext->bbRefs; baseTmp = block->bbNext->bbStkTempsIn; tgtBlock = block->bbNext; break; case BBJ_SWITCH: addStmt = impExtractLastStmt(); assert(addStmt->GetRootNode()->gtOper == GT_SWITCH); for (BasicBlock* const tgtBlock : block->SwitchTargets()) { multRef |= tgtBlock->bbRefs; // Thanks to spill cliques, we should have assigned all or none assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn)); baseTmp = tgtBlock->bbStkTempsIn; if (multRef > 1) { break; } } break; case BBJ_CALLFINALLY: case BBJ_EHCATCHRET: case BBJ_RETURN: case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: case BBJ_THROW: NO_WAY("can't have 'unreached' end of BB with non-empty stack"); break; default: noway_assert(!"Unexpected bbJumpKind"); break; } assert(multRef >= 1); /* Do we have a base temp number? */ bool newTemps = (baseTmp == NO_BASE_TMP); if (newTemps) { /* Grab enough temps for the whole stack */ baseTmp = impGetSpillTmpBase(block); } /* Spill all stack entries into temps */ unsigned level, tempNum; JITDUMP("\nSpilling stack entries into temps\n"); for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++) { GenTree* tree = verCurrentState.esStack[level].val; /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from the other. This should merge to a byref in unverifiable code. However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the successor would be imported assuming there was a TYP_I_IMPL on the stack. Thus the value would not get GC-tracked. Hence, change the temp to TYP_BYREF and reimport the successors. Note: We should only allow this in unverifiable code. */ if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL) { lvaTable[tempNum].lvType = TYP_BYREF; impReimportMarkSuccessors(block); markImport = true; } #ifdef TARGET_64BIT if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT) { // Some other block in the spill clique set this to "int", but now we have "native int". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_I_IMPL; reimportSpillClique = true; } else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL) { // Spill clique has decided this should be "native int", but this block only pushes an "int". // Insert a sign-extension to "native int" so we match the clique. verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } // Consider the case where one branch left a 'byref' on the stack and the other leaves // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64 // behavior instead of asserting and then generating bad code (where we save/restore the // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been // imported already, we need to change the type of the local and reimport the spill clique. // If the 'byref' side has imported, we insert a cast from int to 'native int' to match // the 'byref' size. if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT) { // Some other block in the spill clique set this to "int", but now we have "byref". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_BYREF; reimportSpillClique = true; } else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF) { // Spill clique has decided this should be "byref", but this block only pushes an "int". // Insert a sign-extension to "native int" so we match the clique size. verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } #endif // TARGET_64BIT if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT) { // Some other block in the spill clique set this to "float", but now we have "double". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_DOUBLE; reimportSpillClique = true; } else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE) { // Spill clique has decided this should be "double", but this block only pushes a "float". // Insert a cast to "double" so we match the clique. verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE); } /* If addStmt has a reference to tempNum (can only happen if we are spilling to the temps already used by a previous block), we need to spill addStmt */ if (addStmt != nullptr && !newTemps && gtHasRef(addStmt->GetRootNode(), tempNum)) { GenTree* addTree = addStmt->GetRootNode(); if (addTree->gtOper == GT_JTRUE) { GenTree* relOp = addTree->AsOp()->gtOp1; assert(relOp->OperIsCompare()); var_types type = genActualType(relOp->AsOp()->gtOp1->TypeGet()); if (gtHasRef(relOp->AsOp()->gtOp1, tempNum)) { unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1")); impAssignTempGen(temp, relOp->AsOp()->gtOp1, level); type = genActualType(lvaTable[temp].TypeGet()); relOp->AsOp()->gtOp1 = gtNewLclvNode(temp, type); } if (gtHasRef(relOp->AsOp()->gtOp2, tempNum)) { unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2")); impAssignTempGen(temp, relOp->AsOp()->gtOp2, level); type = genActualType(lvaTable[temp].TypeGet()); relOp->AsOp()->gtOp2 = gtNewLclvNode(temp, type); } } else { assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->AsOp()->gtOp1->TypeGet())); unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH")); impAssignTempGen(temp, addTree->AsOp()->gtOp1, level); addTree->AsOp()->gtOp1 = gtNewLclvNode(temp, genActualType(addTree->AsOp()->gtOp1->TypeGet())); } } /* Spill the stack entry, and replace with the temp */ if (!impSpillStackEntry(level, tempNum #ifdef DEBUG , true, "Spill Stack Entry" #endif )) { if (markImport) { BADCODE("bad stack state"); } // Oops. Something went wrong when spilling. Bad code. verHandleVerificationFailure(block DEBUGARG(true)); goto SPILLSTACK; } } /* Put back the 'jtrue'/'switch' if we removed it earlier */ if (addStmt != nullptr) { impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE); } } // Some of the append/spill logic works on compCurBB assert(compCurBB == block); /* Save the tree list in the block */ impEndTreeList(block); // impEndTreeList sets BBF_IMPORTED on the block // We do *NOT* want to set it later than this because // impReimportSpillClique might clear it if this block is both a // predecessor and successor in the current spill clique assert(block->bbFlags & BBF_IMPORTED); // If we had a int/native int, or float/double collision, we need to re-import if (reimportSpillClique) { // This will re-import all the successors of block (as well as each of their predecessors) impReimportSpillClique(block); // For blocks that haven't been imported yet, we still need to mark them as pending import. for (BasicBlock* const succ : block->Succs()) { if ((succ->bbFlags & BBF_IMPORTED) == 0) { impImportBlockPending(succ); } } } else // the normal case { // otherwise just import the successors of block /* Does this block jump to any other blocks? */ for (BasicBlock* const succ : block->Succs()) { impImportBlockPending(succ); } } } #ifdef _PREFAST_ #pragma warning(pop) #endif /*****************************************************************************/ // // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in // impPendingBlockMembers). Merges the current verification state into the verification state of "block" // (its "pre-state"). void Compiler::impImportBlockPending(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nimpImportBlockPending for " FMT_BB "\n", block->bbNum); } #endif // We will add a block to the pending set if it has not already been imported (or needs to be re-imported), // or if it has, but merging in a predecessor's post-state changes the block's pre-state. // (When we're doing verification, we always attempt the merge to detect verification errors.) // If the block has not been imported, add to pending set. bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0); // Initialize bbEntryState just the first time we try to add this block to the pending list // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set // We use NULL to indicate the 'common' state to avoid memory allocation if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) && (impGetPendingBlockMember(block) == 0)) { verInitBBEntryState(block, &verCurrentState); assert(block->bbStkDepth == 0); block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth); assert(addToPending); assert(impGetPendingBlockMember(block) == 0); } else { // The stack should have the same height on entry to the block from all its predecessors. if (block->bbStkDepth != verCurrentState.esStackDepth) { #ifdef DEBUG char buffer[400]; sprintf_s(buffer, sizeof(buffer), "Block at offset %4.4x to %4.4x in %0.200s entered with different stack depths.\n" "Previous depth was %d, current depth is %d", block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth, verCurrentState.esStackDepth); buffer[400 - 1] = 0; NO_WAY(buffer); #else NO_WAY("Block entered with different stack depths"); #endif } if (!addToPending) { return; } if (block->bbStkDepth > 0) { // We need to fix the types of any spill temps that might have changed: // int->native int, float->double, int->byref, etc. impRetypeEntryStateTemps(block); } // OK, we must add to the pending list, if it's not already in it. if (impGetPendingBlockMember(block) != 0) { return; } } // Get an entry to add to the pending list PendingDsc* dsc; if (impPendingFree) { // We can reuse one of the freed up dscs. dsc = impPendingFree; impPendingFree = dsc->pdNext; } else { // We have to create a new dsc dsc = new (this, CMK_Unknown) PendingDsc; } dsc->pdBB = block; dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth; dsc->pdThisPtrInit = verCurrentState.thisInitialized; // Save the stack trees for later if (verCurrentState.esStackDepth) { impSaveStackState(&dsc->pdSavedStack, false); } // Add the entry to the pending list dsc->pdNext = impPendingList; impPendingList = dsc; impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set. // Various assertions require us to now to consider the block as not imported (at least for // the final time...) block->bbFlags &= ~BBF_IMPORTED; #ifdef DEBUG if (verbose && 0) { printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum); } #endif } /*****************************************************************************/ // // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in // impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block. void Compiler::impReimportBlockPending(BasicBlock* block) { JITDUMP("\nimpReimportBlockPending for " FMT_BB, block->bbNum); assert(block->bbFlags & BBF_IMPORTED); // OK, we must add to the pending list, if it's not already in it. if (impGetPendingBlockMember(block) != 0) { return; } // Get an entry to add to the pending list PendingDsc* dsc; if (impPendingFree) { // We can reuse one of the freed up dscs. dsc = impPendingFree; impPendingFree = dsc->pdNext; } else { // We have to create a new dsc dsc = new (this, CMK_ImpStack) PendingDsc; } dsc->pdBB = block; if (block->bbEntryState) { dsc->pdThisPtrInit = block->bbEntryState->thisInitialized; dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth; dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack; } else { dsc->pdThisPtrInit = TIS_Bottom; dsc->pdSavedStack.ssDepth = 0; dsc->pdSavedStack.ssTrees = nullptr; } // Add the entry to the pending list dsc->pdNext = impPendingList; impPendingList = dsc; impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set. // Various assertions require us to now to consider the block as not imported (at least for // the final time...) block->bbFlags &= ~BBF_IMPORTED; #ifdef DEBUG if (verbose && 0) { printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum); } #endif } void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp) { if (comp->impBlockListNodeFreeList == nullptr) { return comp->getAllocator(CMK_BasicBlock).allocate<BlockListNode>(1); } else { BlockListNode* res = comp->impBlockListNodeFreeList; comp->impBlockListNodeFreeList = res->m_next; return res; } } void Compiler::FreeBlockListNode(Compiler::BlockListNode* node) { node->m_next = impBlockListNodeFreeList; impBlockListNodeFreeList = node; } void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback) { bool toDo = true; noway_assert(!fgComputePredsDone); if (!fgCheapPredsValid) { fgComputeCheapPreds(); } BlockListNode* succCliqueToDo = nullptr; BlockListNode* predCliqueToDo = new (this) BlockListNode(block); while (toDo) { toDo = false; // Look at the successors of every member of the predecessor to-do list. while (predCliqueToDo != nullptr) { BlockListNode* node = predCliqueToDo; predCliqueToDo = node->m_next; BasicBlock* blk = node->m_blk; FreeBlockListNode(node); for (BasicBlock* const succ : blk->Succs()) { // If it's not already in the clique, add it, and also add it // as a member of the successor "toDo" set. if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0) { callback->Visit(SpillCliqueSucc, succ); impSpillCliqueSetMember(SpillCliqueSucc, succ, 1); succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo); toDo = true; } } } // Look at the predecessors of every member of the successor to-do list. while (succCliqueToDo != nullptr) { BlockListNode* node = succCliqueToDo; succCliqueToDo = node->m_next; BasicBlock* blk = node->m_blk; FreeBlockListNode(node); for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next) { BasicBlock* predBlock = pred->block; // If it's not already in the clique, add it, and also add it // as a member of the predecessor "toDo" set. if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0) { callback->Visit(SpillCliquePred, predBlock); impSpillCliqueSetMember(SpillCliquePred, predBlock, 1); predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo); toDo = true; } } } } // If this fails, it means we didn't walk the spill clique properly and somehow managed // miss walking back to include the predecessor we started from. // This most likely cause: missing or out of date bbPreds assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0); } void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) { if (predOrSucc == SpillCliqueSucc) { assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor. blk->bbStkTempsIn = m_baseTmp; } else { assert(predOrSucc == SpillCliquePred); assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor. blk->bbStkTempsOut = m_baseTmp; } } void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) { // For Preds we could be a little smarter and just find the existing store // and re-type it/add a cast, but that is complicated and hopefully very rare, so // just re-import the whole block (just like we do for successors) if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0)) { // If we haven't imported this block and we're not going to (because it isn't on // the pending list) then just ignore it for now. // This block has either never been imported (EntryState == NULL) or it failed // verification. Neither state requires us to force it to be imported now. assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION)); return; } // For successors we have a valid verCurrentState, so just mark them for reimport // the 'normal' way // Unlike predecessors, we *DO* need to reimport the current block because the // initial import had the wrong entry state types. // Similarly, blocks that are currently on the pending list, still need to call // impImportBlockPending to fixup their entry state. if (predOrSucc == SpillCliqueSucc) { m_pComp->impReimportMarkBlock(blk); // Set the current stack state to that of the blk->bbEntryState m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState); assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry()); m_pComp->impImportBlockPending(blk); } else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0)) { // As described above, we are only visiting predecessors so they can // add the appropriate casts, since we have already done that for the current // block, it does not need to be reimported. // Nor do we need to reimport blocks that are still pending, but not yet // imported. // // For predecessors, we have no state to seed the EntryState, so we just have // to assume the existing one is correct. // If the block is also a successor, it will get the EntryState properly // updated when it is visited as a successor in the above "if" block. assert(predOrSucc == SpillCliquePred); m_pComp->impReimportBlockPending(blk); } } // Re-type the incoming lclVar nodes to match the varDsc. void Compiler::impRetypeEntryStateTemps(BasicBlock* blk) { if (blk->bbEntryState != nullptr) { EntryState* es = blk->bbEntryState; for (unsigned level = 0; level < es->esStackDepth; level++) { GenTree* tree = es->esStack[level].val; if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD)) { es->esStack[level].val->gtType = lvaGetDesc(tree->AsLclVarCommon())->TypeGet(); } } } } unsigned Compiler::impGetSpillTmpBase(BasicBlock* block) { if (block->bbStkTempsOut != NO_BASE_TMP) { return block->bbStkTempsOut; } #ifdef DEBUG if (verbose) { printf("\n*************** In impGetSpillTmpBase(" FMT_BB ")\n", block->bbNum); } #endif // DEBUG // Otherwise, choose one, and propagate to all members of the spill clique. // Grab enough temps for the whole stack. unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries")); SetSpillTempsBase callback(baseTmp); // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor // to one spill clique, and similarly can only be the successor to one spill clique impWalkSpillCliqueFromPred(block, &callback); return baseTmp; } void Compiler::impReimportSpillClique(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\n*************** In impReimportSpillClique(" FMT_BB ")\n", block->bbNum); } #endif // DEBUG // If we get here, it is because this block is already part of a spill clique // and one predecessor had an outgoing live stack slot of type int, and this // block has an outgoing live stack slot of type native int. // We need to reset these before traversal because they have already been set // by the previous walk to determine all the members of the spill clique. impInlineRoot()->impSpillCliquePredMembers.Reset(); impInlineRoot()->impSpillCliqueSuccMembers.Reset(); ReimportSpillClique callback(this); impWalkSpillCliqueFromPred(block, &callback); } // Set the pre-state of "block" (which should not have a pre-state allocated) to // a copy of "srcState", cloning tree pointers as required. void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState) { if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom) { block->bbEntryState = nullptr; return; } block->bbEntryState = getAllocator(CMK_Unknown).allocate<EntryState>(1); // block->bbEntryState.esRefcount = 1; block->bbEntryState->esStackDepth = srcState->esStackDepth; block->bbEntryState->thisInitialized = TIS_Bottom; if (srcState->esStackDepth > 0) { block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]); unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry); memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize); for (unsigned level = 0; level < srcState->esStackDepth; level++) { GenTree* tree = srcState->esStack[level].val; block->bbEntryState->esStack[level].val = gtCloneExpr(tree); } } if (verTrackObjCtorInitState) { verSetThisInit(block, srcState->thisInitialized); } return; } void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis) { assert(tis != TIS_Bottom); // Precondition. if (block->bbEntryState == nullptr) { block->bbEntryState = new (this, CMK_Unknown) EntryState(); } block->bbEntryState->thisInitialized = tis; } /* * Resets the current state to the state at the start of the basic block */ void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState) { if (block->bbEntryState == nullptr) { destState->esStackDepth = 0; destState->thisInitialized = TIS_Bottom; return; } destState->esStackDepth = block->bbEntryState->esStackDepth; if (destState->esStackDepth > 0) { unsigned stackSize = destState->esStackDepth * sizeof(StackEntry); memcpy(destState->esStack, block->bbStackOnEntry(), stackSize); } destState->thisInitialized = block->bbThisOnEntry(); return; } ThisInitState BasicBlock::bbThisOnEntry() const { return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom; } unsigned BasicBlock::bbStackDepthOnEntry() const { return (bbEntryState ? bbEntryState->esStackDepth : 0); } void BasicBlock::bbSetStack(void* stackBuffer) { assert(bbEntryState); assert(stackBuffer); bbEntryState->esStack = (StackEntry*)stackBuffer; } StackEntry* BasicBlock::bbStackOnEntry() const { assert(bbEntryState); return bbEntryState->esStack; } void Compiler::verInitCurrentState() { verTrackObjCtorInitState = false; verCurrentState.thisInitialized = TIS_Bottom; // initialize stack info verCurrentState.esStackDepth = 0; assert(verCurrentState.esStack != nullptr); // copy current state to entry state of first BB verInitBBEntryState(fgFirstBB, &verCurrentState); } Compiler* Compiler::impInlineRoot() { if (impInlineInfo == nullptr) { return this; } else { return impInlineInfo->InlineRoot; } } BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk) { if (predOrSucc == SpillCliquePred) { return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd()); } else { assert(predOrSucc == SpillCliqueSucc); return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd()); } } void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val) { if (predOrSucc == SpillCliquePred) { impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val); } else { assert(predOrSucc == SpillCliqueSucc); impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val); } } /***************************************************************************** * * Convert the instrs ("import") into our internal format (trees). The * basic flowgraph has already been constructed and is passed in. */ void Compiler::impImport() { #ifdef DEBUG if (verbose) { printf("*************** In impImport() for %s\n", info.compFullName); } #endif Compiler* inlineRoot = impInlineRoot(); if (info.compMaxStack <= SMALL_STACK_SIZE) { impStkSize = SMALL_STACK_SIZE; } else { impStkSize = info.compMaxStack; } if (this == inlineRoot) { // Allocate the stack contents verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize]; } else { // This is the inlinee compiler, steal the stack from the inliner compiler // (after ensuring that it is large enough). if (inlineRoot->impStkSize < impStkSize) { inlineRoot->impStkSize = impStkSize; inlineRoot->verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize]; } verCurrentState.esStack = inlineRoot->verCurrentState.esStack; } // initialize the entry state at start of method verInitCurrentState(); // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase). if (this == inlineRoot) // These are only used on the root of the inlining tree. { // We have initialized these previously, but to size 0. Make them larger. impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2); impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2); impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2); } inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2); inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2); inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2); impBlockListNodeFreeList = nullptr; #ifdef DEBUG impLastILoffsStmt = nullptr; impNestedStackSpill = false; #endif impBoxTemp = BAD_VAR_NUM; impPendingList = impPendingFree = nullptr; // Skip leading internal blocks. // These can arise from needing a leading scratch BB, from EH normalization, and from OSR entry redirects. // BasicBlock* entryBlock = fgFirstBB; while (entryBlock->bbFlags & BBF_INTERNAL) { JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", entryBlock->bbNum); entryBlock->bbFlags |= BBF_IMPORTED; if (entryBlock->bbJumpKind == BBJ_NONE) { entryBlock = entryBlock->bbNext; } else if (opts.IsOSR() && (entryBlock->bbJumpKind == BBJ_ALWAYS)) { entryBlock = entryBlock->bbJumpDest; } else { assert(!"unexpected bbJumpKind in entry sequence"); } } // Note for OSR we'd like to be able to verify this block must be // stack empty, but won't know that until we've imported...so instead // we'll BADCODE out if we mess up. // // (the concern here is that the runtime asks us to OSR a // different IL version than the one that matched the method that // triggered OSR). This should not happen but I might have the // IL versioning stuff wrong. // // TODO: we also currently expect this block to be a join point, // which we should verify over when we find jump targets. impImportBlockPending(entryBlock); /* Import blocks in the worker-list until there are no more */ while (impPendingList) { /* Remove the entry at the front of the list */ PendingDsc* dsc = impPendingList; impPendingList = impPendingList->pdNext; impSetPendingBlockMember(dsc->pdBB, 0); /* Restore the stack state */ verCurrentState.thisInitialized = dsc->pdThisPtrInit; verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth; if (verCurrentState.esStackDepth) { impRestoreStackState(&dsc->pdSavedStack); } /* Add the entry to the free list for reuse */ dsc->pdNext = impPendingFree; impPendingFree = dsc; /* Now import the block */ if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION) { verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true)); impEndTreeList(dsc->pdBB); } else { impImportBlock(dsc->pdBB); if (compDonotInline()) { return; } if (compIsForImportOnly()) { return; } } } #ifdef DEBUG if (verbose && info.compXcptnsCount) { printf("\nAfter impImport() added block for try,catch,finally"); fgDispBasicBlocks(); printf("\n"); } // Used in impImportBlockPending() for STRESS_CHK_REIMPORT for (BasicBlock* const block : Blocks()) { block->bbFlags &= ~BBF_VISITED; } #endif } // Checks if a typeinfo (usually stored in the type stack) is a struct. // The invariant here is that if it's not a ref or a method and has a class handle // it's a valuetype bool Compiler::impIsValueType(typeInfo* pTypeInfo) { if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd()) { return true; } else { return false; } } /***************************************************************************** * Check to see if the tree is the address of a local or the address of a field in a local. *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns true. */ bool Compiler::impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut) { if (tree->gtOper != GT_ADDR) { return false; } GenTree* op = tree->AsOp()->gtOp1; while (op->gtOper == GT_FIELD) { op = op->AsField()->GetFldObj(); if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL. { op = op->AsOp()->gtOp1; } else { return false; } } if (op->gtOper == GT_LCL_VAR) { if (lclVarTreeOut != nullptr) { *lclVarTreeOut = op; } return true; } else { return false; } } //------------------------------------------------------------------------ // impMakeDiscretionaryInlineObservations: make observations that help // determine the profitability of a discretionary inline // // Arguments: // pInlineInfo -- InlineInfo for the inline, or null for the prejit root // inlineResult -- InlineResult accumulating information about this inline // // Notes: // If inlining or prejitting the root, this method also makes // various observations about the method that factor into inline // decisions. It sets `compNativeSizeEstimate` as a side effect. void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult) { assert((pInlineInfo != nullptr && compIsForInlining()) || // Perform the actual inlining. (pInlineInfo == nullptr && !compIsForInlining()) // Calculate the static inlining hint for ngen. ); // If we're really inlining, we should just have one result in play. assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult)); // If this is a "forceinline" method, the JIT probably shouldn't have gone // to the trouble of estimating the native code size. Even if it did, it // shouldn't be relying on the result of this method. assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE); // Note if the caller contains NEWOBJ or NEWARR. Compiler* rootCompiler = impInlineRoot(); if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0) { inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY); } if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0) { inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ); } bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0; bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0; if (isSpecialMethod) { if (calleeIsStatic) { inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR); } else { inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR); } } else if (!calleeIsStatic) { // Callee is an instance method. // // Check if the callee has the same 'this' as the root. if (pInlineInfo != nullptr) { GenTree* thisArg = pInlineInfo->iciCall->AsCall()->gtCallThisArg->GetNode(); assert(thisArg); bool isSameThis = impIsThis(thisArg); inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis); } } bool callsiteIsGeneric = (rootCompiler->info.compMethodInfo->args.sigInst.methInstCount != 0) || (rootCompiler->info.compMethodInfo->args.sigInst.classInstCount != 0); bool calleeIsGeneric = (info.compMethodInfo->args.sigInst.methInstCount != 0) || (info.compMethodInfo->args.sigInst.classInstCount != 0); if (!callsiteIsGeneric && calleeIsGeneric) { inlineResult->Note(InlineObservation::CALLSITE_NONGENERIC_CALLS_GENERIC); } // Inspect callee's arguments (and the actual values at the callsite for them) CORINFO_SIG_INFO sig = info.compMethodInfo->args; CORINFO_ARG_LIST_HANDLE sigArg = sig.args; GenTreeCall::Use* argUse = pInlineInfo == nullptr ? nullptr : pInlineInfo->iciCall->AsCall()->gtCallArgs; for (unsigned i = 0; i < info.compMethodInfo->args.numArgs; i++) { CORINFO_CLASS_HANDLE sigClass; CorInfoType corType = strip(info.compCompHnd->getArgType(&sig, sigArg, &sigClass)); GenTree* argNode = argUse == nullptr ? nullptr : argUse->GetNode()->gtSkipPutArgType(); if (corType == CORINFO_TYPE_CLASS) { sigClass = info.compCompHnd->getArgClass(&sig, sigArg); } else if (corType == CORINFO_TYPE_VALUECLASS) { inlineResult->Note(InlineObservation::CALLEE_ARG_STRUCT); } else if (corType == CORINFO_TYPE_BYREF) { sigClass = info.compCompHnd->getArgClass(&sig, sigArg); corType = info.compCompHnd->getChildType(sigClass, &sigClass); } if (argNode != nullptr) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE argCls = gtGetClassHandle(argNode, &isExact, &isNonNull); if (argCls != nullptr) { const bool isArgValueType = eeIsValueClass(argCls); // Exact class of the arg is known if (isExact && !isArgValueType) { inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS); if ((argCls != sigClass) && (sigClass != nullptr)) { // .. but the signature accepts a less concrete type. inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS_SIG_IS_NOT); } } // Arg is a reference type in the signature and a boxed value type was passed. else if (isArgValueType && (corType == CORINFO_TYPE_CLASS)) { inlineResult->Note(InlineObservation::CALLSITE_ARG_BOXED); } } if (argNode->OperIsConst()) { inlineResult->Note(InlineObservation::CALLSITE_ARG_CONST); } argUse = argUse->GetNext(); } sigArg = info.compCompHnd->getArgNext(sigArg); } // Note if the callee's return type is a value type if (info.compMethodInfo->args.retType == CORINFO_TYPE_VALUECLASS) { inlineResult->Note(InlineObservation::CALLEE_RETURNS_STRUCT); } // Note if the callee's class is a promotable struct if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0) { assert(structPromotionHelper != nullptr); if (structPromotionHelper->CanPromoteStructType(info.compClassHnd)) { inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE); } inlineResult->Note(InlineObservation::CALLEE_CLASS_VALUETYPE); } #ifdef FEATURE_SIMD // Note if this method is has SIMD args or return value if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn) { inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD); } #endif // FEATURE_SIMD // Roughly classify callsite frequency. InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED; // If this is a prejit root, or a maximally hot block... if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->isMaxBBWeight())) { frequency = InlineCallsiteFrequency::HOT; } // No training data. Look for loop-like things. // We consider a recursive call loop-like. Do not give the inlining boost to the method itself. // However, give it to things nearby. else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) && (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle)) { frequency = InlineCallsiteFrequency::LOOP; } else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT)) { frequency = InlineCallsiteFrequency::WARM; } // Now modify the multiplier based on where we're called from. else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR)) { frequency = InlineCallsiteFrequency::RARE; } else { frequency = InlineCallsiteFrequency::BORING; } // Also capture the block weight of the call site. // // In the prejit root case, assume at runtime there might be a hot call site // for this method, so we won't prematurely conclude this method should never // be inlined. // weight_t weight = 0; if (pInlineInfo != nullptr) { weight = pInlineInfo->iciBlock->bbWeight; } else { const weight_t prejitHotCallerWeight = 1000000.0; weight = prejitHotCallerWeight; } inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency)); inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, (int)(weight)); bool hasProfile = false; double profileFreq = 0.0; // If the call site has profile data, report the relative frequency of the site. // if ((pInlineInfo != nullptr) && rootCompiler->fgHaveSufficientProfileData()) { const weight_t callSiteWeight = pInlineInfo->iciBlock->bbWeight; const weight_t entryWeight = rootCompiler->fgFirstBB->bbWeight; profileFreq = fgProfileWeightsEqual(entryWeight, 0.0) ? 0.0 : callSiteWeight / entryWeight; hasProfile = true; assert(callSiteWeight >= 0); assert(entryWeight >= 0); } else if (pInlineInfo == nullptr) { // Simulate a hot callsite for PrejitRoot mode. hasProfile = true; profileFreq = 1.0; } inlineResult->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, hasProfile); inlineResult->NoteDouble(InlineObservation::CALLSITE_PROFILE_FREQUENCY, profileFreq); } /***************************************************************************** This method makes STATIC inlining decision based on the IL code. It should not make any inlining decision based on the context. If forceInline is true, then the inlining decision should not depend on performance heuristics (code size, etc.). */ void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle, CORINFO_METHOD_INFO* methInfo, bool forceInline, InlineResult* inlineResult) { unsigned codeSize = methInfo->ILCodeSize; // We shouldn't have made up our minds yet... assert(!inlineResult->IsDecided()); if (methInfo->EHcount) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH); return; } if ((methInfo->ILCode == nullptr) || (codeSize == 0)) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY); return; } // For now we don't inline varargs (import code can't handle it) if (methInfo->args.isVarArg()) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS); return; } // Reject if it has too many locals. // This is currently an implementation limit due to fixed-size arrays in the // inline info, rather than a performance heuristic. inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs); if (methInfo->locals.numArgs > MAX_INL_LCLS) { inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS); return; } // Make sure there aren't too many arguments. // This is currently an implementation limit due to fixed-size arrays in the // inline info, rather than a performance heuristic. inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs); if (methInfo->args.numArgs > MAX_INL_ARGS) { inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS); return; } // Note force inline state inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline); // Note IL code size inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize); if (inlineResult->IsFailure()) { return; } // Make sure maxstack is not too big inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack); if (inlineResult->IsFailure()) { return; } } /***************************************************************************** */ void Compiler::impCheckCanInline(GenTreeCall* call, CORINFO_METHOD_HANDLE fncHandle, unsigned methAttr, CORINFO_CONTEXT_HANDLE exactContextHnd, InlineCandidateInfo** ppInlineCandidateInfo, InlineResult* inlineResult) { // Either EE or JIT might throw exceptions below. // If that happens, just don't inline the method. struct Param { Compiler* pThis; GenTreeCall* call; CORINFO_METHOD_HANDLE fncHandle; unsigned methAttr; CORINFO_CONTEXT_HANDLE exactContextHnd; InlineResult* result; InlineCandidateInfo** ppInlineCandidateInfo; } param; memset(&param, 0, sizeof(param)); param.pThis = this; param.call = call; param.fncHandle = fncHandle; param.methAttr = methAttr; param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle); param.result = inlineResult; param.ppInlineCandidateInfo = ppInlineCandidateInfo; bool success = eeRunWithErrorTrap<Param>( [](Param* pParam) { CorInfoInitClassResult initClassResult; #ifdef DEBUG const char* methodName; const char* className; methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className); if (JitConfig.JitNoInline()) { pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE); goto _exit; } #endif /* Try to get the code address/size for the method */ CORINFO_METHOD_INFO methInfo; if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo)) { pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO); goto _exit; } // Profile data allows us to avoid early "too many IL bytes" outs. pParam->result->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, pParam->pThis->fgHaveSufficientProfileData()); bool forceInline; forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE); pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result); if (pParam->result->IsFailure()) { assert(pParam->result->IsNever()); goto _exit; } // Speculatively check if initClass() can be done. // If it can be done, we will try to inline the method. initClassResult = pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */, pParam->exactContextHnd /* context */); if (initClassResult & CORINFO_INITCLASS_DONT_INLINE) { pParam->result->NoteFatal(InlineObservation::CALLSITE_CANT_CLASS_INIT); goto _exit; } // Given the EE the final say in whether to inline or not. // This should be last since for verifiable code, this can be expensive /* VM Inline check also ensures that the method is verifiable if needed */ CorInfoInline vmResult; vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle); if (vmResult == INLINE_FAIL) { pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE); } else if (vmResult == INLINE_NEVER) { pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE); } if (pParam->result->IsFailure()) { // Make sure not to report this one. It was already reported by the VM. pParam->result->SetReported(); goto _exit; } /* Get the method properties */ CORINFO_CLASS_HANDLE clsHandle; clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle); unsigned clsAttr; clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle); /* Get the return type */ var_types fncRetType; fncRetType = pParam->call->TypeGet(); #ifdef DEBUG var_types fncRealRetType; fncRealRetType = JITtype2varType(methInfo.args.retType); assert((genActualType(fncRealRetType) == genActualType(fncRetType)) || // <BUGNUM> VSW 288602 </BUGNUM> // In case of IJW, we allow to assign a native pointer to a BYREF. (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) || (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT))); #endif // Allocate an InlineCandidateInfo structure, // // Or, reuse the existing GuardedDevirtualizationCandidateInfo, // which was pre-allocated to have extra room. // InlineCandidateInfo* pInfo; if (pParam->call->IsGuardedDevirtualizationCandidate()) { pInfo = pParam->call->gtInlineCandidateInfo; } else { pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo; // Null out bits we don't use when we're just inlining pInfo->guardedClassHandle = nullptr; pInfo->guardedMethodHandle = nullptr; pInfo->guardedMethodUnboxedEntryHandle = nullptr; pInfo->likelihood = 0; pInfo->requiresInstMethodTableArg = false; } pInfo->methInfo = methInfo; pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd; pInfo->clsHandle = clsHandle; pInfo->exactContextHnd = pParam->exactContextHnd; pInfo->retExpr = nullptr; pInfo->preexistingSpillTemp = BAD_VAR_NUM; pInfo->clsAttr = clsAttr; pInfo->methAttr = pParam->methAttr; pInfo->initClassResult = initClassResult; pInfo->fncRetType = fncRetType; pInfo->exactContextNeedsRuntimeLookup = false; pInfo->inlinersContext = pParam->pThis->compInlineContext; // Note exactContextNeedsRuntimeLookup is reset later on, // over in impMarkInlineCandidate. *(pParam->ppInlineCandidateInfo) = pInfo; _exit:; }, &param); if (!success) { param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); } } //------------------------------------------------------------------------ // impInlineRecordArgInfo: record information about an inline candidate argument // // Arguments: // pInlineInfo - inline info for the inline candidate // curArgVal - tree for the caller actual argument value // argNum - logical index of this argument // inlineResult - result of ongoing inline evaluation // // Notes: // // Checks for various inline blocking conditions and makes notes in // the inline info arg table about the properties of the actual. These // properties are used later by impInlineFetchArg to determine how best to // pass the argument into the inlinee. void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo, GenTree* curArgVal, unsigned argNum, InlineResult* inlineResult) { InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum]; inlCurArgInfo->argNode = curArgVal; // Save the original tree, with PUT_ARG and RET_EXPR. curArgVal = curArgVal->gtSkipPutArgType(); curArgVal = curArgVal->gtRetExprVal(); if (curArgVal->gtOper == GT_MKREFANY) { inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY); return; } GenTree* lclVarTree; const bool isAddressInLocal = impIsAddressInLocal(curArgVal, &lclVarTree); if (isAddressInLocal && varTypeIsStruct(lclVarTree)) { inlCurArgInfo->argIsByRefToStructLocal = true; #ifdef FEATURE_SIMD if (lvaTable[lclVarTree->AsLclVarCommon()->GetLclNum()].lvSIMDType) { pInlineInfo->hasSIMDTypeArgLocalOrReturn = true; } #endif // FEATURE_SIMD } if (curArgVal->gtFlags & GTF_ALL_EFFECT) { inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0; inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0; } if (curArgVal->gtOper == GT_LCL_VAR) { inlCurArgInfo->argIsLclVar = true; /* Remember the "original" argument number */ INDEBUG(curArgVal->AsLclVar()->gtLclILoffs = argNum;) } if (curArgVal->IsInvariant()) { inlCurArgInfo->argIsInvariant = true; if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->AsIntCon()->gtIconVal == 0)) { // Abort inlining at this call site inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS); return; } } bool isExact = false; bool isNonNull = false; inlCurArgInfo->argIsExact = (gtGetClassHandle(curArgVal, &isExact, &isNonNull) != NO_CLASS_HANDLE) && isExact; // If the arg is a local that is address-taken, we can't safely // directly substitute it into the inlinee. // // Previously we'd accomplish this by setting "argHasLdargaOp" but // that has a stronger meaning: that the arg value can change in // the method body. Using that flag prevents type propagation, // which is safe in this case. // // Instead mark the arg as having a caller local ref. if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal)) { inlCurArgInfo->argHasCallerLocalRef = true; } #ifdef DEBUG if (verbose) { if (inlCurArgInfo->argIsThis) { printf("thisArg:"); } else { printf("\nArgument #%u:", argNum); } if (inlCurArgInfo->argIsLclVar) { printf(" is a local var"); } if (inlCurArgInfo->argIsInvariant) { printf(" is a constant"); } if (inlCurArgInfo->argHasGlobRef) { printf(" has global refs"); } if (inlCurArgInfo->argHasCallerLocalRef) { printf(" has caller local ref"); } if (inlCurArgInfo->argHasSideEff) { printf(" has side effects"); } if (inlCurArgInfo->argHasLdargaOp) { printf(" has ldarga effect"); } if (inlCurArgInfo->argHasStargOp) { printf(" has starg effect"); } if (inlCurArgInfo->argIsByRefToStructLocal) { printf(" is byref to a struct local"); } printf("\n"); gtDispTree(curArgVal); printf("\n"); } #endif } //------------------------------------------------------------------------ // impInlineInitVars: setup inline information for inlinee args and locals // // Arguments: // pInlineInfo - inline info for the inline candidate // // Notes: // This method primarily adds caller-supplied info to the inlArgInfo // and sets up the lclVarInfo table. // // For args, the inlArgInfo records properties of the actual argument // including the tree node that produces the arg value. This node is // usually the tree node present at the call, but may also differ in // various ways: // - when the call arg is a GT_RET_EXPR, we search back through the ret // expr chain for the actual node. Note this will either be the original // call (which will be a failed inline by this point), or the return // expression from some set of inlines. // - when argument type casting is needed the necessary casts are added // around the argument node. // - if an argument can be simplified by folding then the node here is the // folded value. // // The method may make observations that lead to marking this candidate as // a failed inline. If this happens the initialization is abandoned immediately // to try and reduce the jit time cost for a failed inline. void Compiler::impInlineInitVars(InlineInfo* pInlineInfo) { assert(!compIsForInlining()); GenTreeCall* call = pInlineInfo->iciCall; CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo; unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr; InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo; InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo; InlineResult* inlineResult = pInlineInfo->inlineResult; // Inlined methods always use the managed calling convention const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo, CorInfoCallConvExtension::Managed); /* init the argument stuct */ memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0])); GenTreeCall::Use* thisArg = call->gtCallThisArg; unsigned argCnt = 0; // Count of the arguments assert((methInfo->args.hasThis()) == (thisArg != nullptr)); if (thisArg != nullptr) { inlArgInfo[0].argIsThis = true; impInlineRecordArgInfo(pInlineInfo, thisArg->GetNode(), argCnt, inlineResult); if (inlineResult->IsFailure()) { return; } /* Increment the argument count */ argCnt++; } /* Record some information about each of the arguments */ bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0; #if USER_ARGS_COME_LAST unsigned typeCtxtArg = (thisArg != nullptr) ? 1 : 0; #else // USER_ARGS_COME_LAST unsigned typeCtxtArg = methInfo->args.totalILArgs(); #endif // USER_ARGS_COME_LAST for (GenTreeCall::Use& use : call->Args()) { if (hasRetBuffArg && (&use == call->gtCallArgs)) { continue; } // Ignore the type context argument if (hasTypeCtxtArg && (argCnt == typeCtxtArg)) { pInlineInfo->typeContextArg = typeCtxtArg; typeCtxtArg = 0xFFFFFFFF; continue; } GenTree* actualArg = gtFoldExpr(use.GetNode()); impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult); if (inlineResult->IsFailure()) { return; } /* Increment the argument count */ argCnt++; } /* Make sure we got the arg number right */ assert(argCnt == methInfo->args.totalILArgs()); #ifdef FEATURE_SIMD bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn; #endif // FEATURE_SIMD /* We have typeless opcodes, get type information from the signature */ if (thisArg != nullptr) { lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle); lclVarInfo[0].lclHasLdlocaOp = false; #ifdef FEATURE_SIMD // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase // the inlining multiplier) for anything in that assembly. // But we only need to normalize it if it is a TYP_STRUCT // (which we need to do even if we have already set foundSIMDType). if (!foundSIMDType && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo))) { foundSIMDType = true; } #endif // FEATURE_SIMD var_types sigType = ((clsAttr & CORINFO_FLG_VALUECLASS) != 0) ? TYP_BYREF : TYP_REF; lclVarInfo[0].lclTypeInfo = sigType; GenTree* thisArgNode = thisArg->GetNode(); assert(varTypeIsGC(thisArgNode->TypeGet()) || // "this" is managed ((thisArgNode->TypeGet() == TYP_I_IMPL) && // "this" is unmgd but the method's class doesnt care (clsAttr & CORINFO_FLG_VALUECLASS))); if (genActualType(thisArgNode->TypeGet()) != genActualType(sigType)) { if (sigType == TYP_REF) { /* The argument cannot be bashed into a ref (see bug 750871) */ inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF); return; } /* This can only happen with byrefs <-> ints/shorts */ assert(sigType == TYP_BYREF); assert((genActualType(thisArgNode->TypeGet()) == TYP_I_IMPL) || (thisArgNode->TypeGet() == TYP_BYREF)); lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } } /* Init the types of the arguments and make sure the types * from the trees match the types in the signature */ CORINFO_ARG_LIST_HANDLE argLst; argLst = methInfo->args.args; unsigned i; for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst)) { var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args); lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst); #ifdef FEATURE_SIMD if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo))) { // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've // found a SIMD type, even if this may not be a type we recognize (the assumption is that // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier). foundSIMDType = true; if (sigType == TYP_STRUCT) { var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle()); sigType = structType; } } #endif // FEATURE_SIMD lclVarInfo[i].lclTypeInfo = sigType; lclVarInfo[i].lclHasLdlocaOp = false; /* Does the tree type match the signature type? */ GenTree* inlArgNode = inlArgInfo[i].argNode; if ((sigType != inlArgNode->gtType) || inlArgNode->OperIs(GT_PUTARG_TYPE)) { assert(impCheckImplicitArgumentCoercion(sigType, inlArgNode->gtType)); assert(!varTypeIsStruct(inlArgNode->gtType) && !varTypeIsStruct(sigType)); /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints, but in bad IL cases with caller-callee signature mismatches we can see other types. Intentionally reject cases with mismatches so the jit is more flexible when encountering bad IL. */ bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) || (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) || (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType)); if (!isPlausibleTypeMatch) { inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE); return; } GenTree** pInlArgNode; if (inlArgNode->OperIs(GT_PUTARG_TYPE)) { // There was a widening or narrowing cast. GenTreeUnOp* putArgType = inlArgNode->AsUnOp(); pInlArgNode = &putArgType->gtOp1; inlArgNode = putArgType->gtOp1; } else { // The same size but different type of the arguments. pInlArgNode = &inlArgInfo[i].argNode; } /* Is it a narrowing or widening cast? * Widening casts are ok since the value computed is already * normalized to an int (on the IL stack) */ if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType)) { if (sigType == TYP_BYREF) { lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } else if (inlArgNode->gtType == TYP_BYREF) { assert(varTypeIsIntOrI(sigType)); /* If possible bash the BYREF to an int */ if (inlArgNode->IsLocalAddrExpr() != nullptr) { inlArgNode->gtType = TYP_I_IMPL; lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } else { /* Arguments 'int <- byref' cannot be changed */ inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT); return; } } else if (genTypeSize(sigType) < TARGET_POINTER_SIZE) { // Narrowing cast. if (inlArgNode->OperIs(GT_LCL_VAR)) { const unsigned lclNum = inlArgNode->AsLclVarCommon()->GetLclNum(); if (!lvaTable[lclNum].lvNormalizeOnLoad() && sigType == lvaGetRealType(lclNum)) { // We don't need to insert a cast here as the variable // was assigned a normalized value of the right type. continue; } } inlArgNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType); inlArgInfo[i].argIsLclVar = false; // Try to fold the node in case we have constant arguments. if (inlArgInfo[i].argIsInvariant) { inlArgNode = gtFoldExprConst(inlArgNode); assert(inlArgNode->OperIsConst()); } *pInlArgNode = inlArgNode; } #ifdef TARGET_64BIT else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType)) { // This should only happen for int -> native int widening inlArgNode = gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType); inlArgInfo[i].argIsLclVar = false; /* Try to fold the node in case we have constant arguments */ if (inlArgInfo[i].argIsInvariant) { inlArgNode = gtFoldExprConst(inlArgNode); assert(inlArgNode->OperIsConst()); } *pInlArgNode = inlArgNode; } #endif // TARGET_64BIT } } } /* Init the types of the local variables */ CORINFO_ARG_LIST_HANDLE localsSig; localsSig = methInfo->locals.args; for (i = 0; i < methInfo->locals.numArgs; i++) { bool isPinned; var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned); lclVarInfo[i + argCnt].lclHasLdlocaOp = false; lclVarInfo[i + argCnt].lclTypeInfo = type; if (varTypeIsGC(type)) { if (isPinned) { JITDUMP("Inlinee local #%02u is pinned\n", i); lclVarInfo[i + argCnt].lclIsPinned = true; // Pinned locals may cause inlines to fail. inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS); if (inlineResult->IsFailure()) { return; } } pInlineInfo->numberOfGcRefLocals++; } else if (isPinned) { JITDUMP("Ignoring pin on inlinee local #%02u -- not a GC type\n", i); } lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig); // If this local is a struct type with GC fields, inform the inliner. It may choose to bail // out on the inline. if (type == TYP_STRUCT) { CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle(); DWORD typeFlags = info.compCompHnd->getClassAttribs(lclHandle); if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0) { inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT); if (inlineResult->IsFailure()) { return; } // Do further notification in the case where the call site is rare; some policies do // not track the relative hotness of call sites for "always" inline cases. if (pInlineInfo->iciBlock->isRunRarely()) { inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT); if (inlineResult->IsFailure()) { return; } } } } localsSig = info.compCompHnd->getArgNext(localsSig); #ifdef FEATURE_SIMD if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo))) { foundSIMDType = true; if (supportSIMDTypes() && type == TYP_STRUCT) { var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle()); lclVarInfo[i + argCnt].lclTypeInfo = structType; } } #endif // FEATURE_SIMD } #ifdef FEATURE_SIMD if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd)) { foundSIMDType = true; } pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType; #endif // FEATURE_SIMD } //------------------------------------------------------------------------ // impInlineFetchLocal: get a local var that represents an inlinee local // // Arguments: // lclNum -- number of the inlinee local // reason -- debug string describing purpose of the local var // // Returns: // Number of the local to use // // Notes: // This method is invoked only for locals actually used in the // inlinee body. // // Allocates a new temp if necessary, and copies key properties // over from the inlinee local var info. unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason)) { assert(compIsForInlining()); unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum]; if (tmpNum == BAD_VAR_NUM) { const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt]; const var_types lclTyp = inlineeLocal.lclTypeInfo; // The lifetime of this local might span multiple BBs. // So it is a long lifetime local. impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason)); // Copy over key info lvaTable[tmpNum].lvType = lclTyp; lvaTable[tmpNum].lvHasLdAddrOp = inlineeLocal.lclHasLdlocaOp; lvaTable[tmpNum].lvPinned = inlineeLocal.lclIsPinned; lvaTable[tmpNum].lvHasILStoreOp = inlineeLocal.lclHasStlocOp; lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp; // Copy over class handle for ref types. Note this may be a // shared type -- someday perhaps we can get the exact // signature and pass in a more precise type. if (lclTyp == TYP_REF) { assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = !inlineeLocal.lclHasMultipleStlocOp && !inlineeLocal.lclHasLdlocaOp; if (lvaTable[tmpNum].lvSingleDef) { JITDUMP("Marked V%02u as a single def temp\n", tmpNum); } lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef()); } if (inlineeLocal.lclVerTypeInfo.IsStruct()) { if (varTypeIsStruct(lclTyp)) { lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */); } else { // This is a wrapped primitive. Make sure the verstate knows that lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo; } } #ifdef DEBUG // Sanity check that we're properly prepared for gc ref locals. if (varTypeIsGC(lclTyp)) { // Since there are gc locals we should have seen them earlier // and if there was a return value, set up the spill temp. assert(impInlineInfo->HasGcRefLocals()); assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp()); } else { // Make sure all pinned locals count as gc refs. assert(!inlineeLocal.lclIsPinned); } #endif // DEBUG } return tmpNum; } //------------------------------------------------------------------------ // impInlineFetchArg: return tree node for argument value in an inlinee // // Arguments: // lclNum -- argument number in inlinee IL // inlArgInfo -- argument info for inlinee // lclVarInfo -- var info for inlinee // // Returns: // Tree for the argument's value. Often an inlinee-scoped temp // GT_LCL_VAR but can be other tree kinds, if the argument // expression from the caller can be directly substituted into the // inlinee body. // // Notes: // Must be used only for arguments -- use impInlineFetchLocal for // inlinee locals. // // Direct substitution is performed when the formal argument cannot // change value in the inlinee body (no starg or ldarga), and the // actual argument expression's value cannot be changed if it is // substituted it into the inlinee body. // // Even if an inlinee-scoped temp is returned here, it may later be // "bashed" to a caller-supplied tree when arguments are actually // passed (see fgInlinePrependStatements). Bashing can happen if // the argument ends up being single use and other conditions are // met. So the contents of the tree returned here may not end up // being the ones ultimately used for the argument. // // This method will side effect inlArgInfo. It should only be called // for actual uses of the argument in the inlinee. GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo) { // Cache the relevant arg and lcl info for this argument. // We will modify argInfo but not lclVarInfo. InlArgInfo& argInfo = inlArgInfo[lclNum]; const InlLclVarInfo& lclInfo = lclVarInfo[lclNum]; const bool argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp; const var_types lclTyp = lclInfo.lclTypeInfo; GenTree* op1 = nullptr; GenTree* argNode = argInfo.argNode->gtSkipPutArgType()->gtRetExprVal(); if (argInfo.argIsInvariant && !argCanBeModified) { // Directly substitute constants or addresses of locals // // Clone the constant. Note that we cannot directly use // argNode in the trees even if !argInfo.argIsUsed as this // would introduce aliasing between inlArgInfo[].argNode and // impInlineExpr. Then gtFoldExpr() could change it, causing // further references to the argument working off of the // bashed copy. op1 = gtCloneExpr(argNode); PREFIX_ASSUME(op1 != nullptr); argInfo.argTmpNum = BAD_VAR_NUM; // We may need to retype to ensure we match the callee's view of the type. // Otherwise callee-pass throughs of arguments can create return type // mismatches that block inlining. // // Note argument type mismatches that prevent inlining should // have been caught in impInlineInitVars. if (op1->TypeGet() != lclTyp) { op1->gtType = genActualType(lclTyp); } } else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef) { // Directly substitute unaliased caller locals for args that cannot be modified // // Use the caller-supplied node if this is the first use. op1 = argNode; unsigned argLclNum = op1->AsLclVarCommon()->GetLclNum(); argInfo.argTmpNum = argLclNum; // Use an equivalent copy if this is the second or subsequent // use. // // Note argument type mismatches that prevent inlining should // have been caught in impInlineInitVars. If inlining is not prevented // but a cast is necessary, we similarly expect it to have been inserted then. // So here we may have argument type mismatches that are benign, for instance // passing a TYP_SHORT local (eg. normalized-on-load) as a TYP_INT arg. // The exception is when the inlining means we should start tracking the argument. if (argInfo.argIsUsed || ((lclTyp == TYP_BYREF) && (op1->TypeGet() != TYP_BYREF))) { assert(op1->gtOper == GT_LCL_VAR); assert(lclNum == op1->AsLclVar()->gtLclILoffs); // Create a new lcl var node - remember the argument lclNum op1 = impCreateLocalNode(argLclNum DEBUGARG(op1->AsLclVar()->gtLclILoffs)); // Start tracking things as a byref if the parameter is a byref. if (lclTyp == TYP_BYREF) { op1->gtType = TYP_BYREF; } } } else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp) { /* Argument is a by-ref address to a struct, a normed struct, or its field. In these cases, don't spill the byref to a local, simply clone the tree and use it. This way we will increase the chance for this byref to be optimized away by a subsequent "dereference" operation. From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal. For example, if the caller is: ldloca.s V_1 // V_1 is a local struct call void Test.ILPart::RunLdargaOnPointerArg(int32*) and the callee being inlined has: .method public static void RunLdargaOnPointerArg(int32* ptrToInts) cil managed ldarga.s ptrToInts call void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**) then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR. */ assert(argNode->TypeGet() == TYP_BYREF || argNode->TypeGet() == TYP_I_IMPL); op1 = gtCloneExpr(argNode); } else { /* Argument is a complex expression - it must be evaluated into a temp */ if (argInfo.argHasTmp) { assert(argInfo.argIsUsed); assert(argInfo.argTmpNum < lvaCount); /* Create a new lcl var node - remember the argument lclNum */ op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp)); /* This is the second or later use of the this argument, so we have to use the temp (instead of the actual arg) */ argInfo.argBashTmpNode = nullptr; } else { /* First time use */ assert(!argInfo.argIsUsed); /* Reserve a temp for the expression. * Use a large size node as we may change it later */ const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg")); lvaTable[tmpNum].lvType = lclTyp; // For ref types, determine the type of the temp. if (lclTyp == TYP_REF) { if (!argCanBeModified) { // If the arg can't be modified in the method // body, use the type of the value, if // known. Otherwise, use the declared type. assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmpNum); lvaSetClass(tmpNum, argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef()); } else { // Arg might be modified, use the declared type of // the argument. lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef()); } } assert(!lvaTable[tmpNum].IsAddressExposed()); if (argInfo.argHasLdargaOp) { lvaTable[tmpNum].lvHasLdAddrOp = 1; } if (lclInfo.lclVerTypeInfo.IsStruct()) { if (varTypeIsStruct(lclTyp)) { lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */); if (info.compIsVarArgs) { lvaSetStructUsedAsVarArg(tmpNum); } } else { // This is a wrapped primitive. Make sure the verstate knows that lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo; } } argInfo.argHasTmp = true; argInfo.argTmpNum = tmpNum; // If we require strict exception order, then arguments must // be evaluated in sequence before the body of the inlined method. // So we need to evaluate them to a temp. // Also, if arguments have global or local references, we need to // evaluate them to a temp before the inlined body as the // inlined body may be modifying the global ref. // TODO-1stClassStructs: We currently do not reuse an existing lclVar // if it is a struct, because it requires some additional handling. if ((!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef && !argInfo.argHasCallerLocalRef)) { /* Get a *LARGE* LCL_VAR node */ op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp) DEBUGARG(lclNum)); /* Record op1 as the very first use of this argument. If there are no further uses of the arg, we may be able to use the actual arg node instead of the temp. If we do see any further uses, we will clear this. */ argInfo.argBashTmpNode = op1; } else { /* Get a small LCL_VAR node */ op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp)); /* No bashing of this argument */ argInfo.argBashTmpNode = nullptr; } } } // Mark this argument as used. argInfo.argIsUsed = true; return op1; } /****************************************************************************** Is this the original "this" argument to the call being inlined? Note that we do not inline methods with "starg 0", and so we do not need to worry about it. */ bool Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo) { assert(compIsForInlining()); return (tree->gtOper == GT_LCL_VAR && tree->AsLclVarCommon()->GetLclNum() == inlArgInfo[0].argTmpNum); } //----------------------------------------------------------------------------- // impInlineIsGuaranteedThisDerefBeforeAnySideEffects: Check if a dereference in // the inlinee can guarantee that the "this" pointer is non-NULL. // // Arguments: // additionalTree - a tree to check for side effects // additionalCallArgs - a list of call args to check for side effects // dereferencedAddress - address expression being dereferenced // inlArgInfo - inlinee argument information // // Notes: // If we haven't hit a branch or a side effect, and we are dereferencing // from 'this' to access a field or make GTF_CALL_NULLCHECK call, // then we can avoid a separate null pointer check. // // The importer stack and current statement list are searched for side effects. // Trees that have been popped of the stack but haven't been appended to the // statement list and have to be checked for side effects may be provided via // additionalTree and additionalCallArgs. // bool Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree, GenTreeCall::Use* additionalCallArgs, GenTree* dereferencedAddress, InlArgInfo* inlArgInfo) { assert(compIsForInlining()); assert(opts.OptEnabled(CLFLG_INLINING)); BasicBlock* block = compCurBB; if (block != fgFirstBB) { return false; } if (!impInlineIsThis(dereferencedAddress, inlArgInfo)) { return false; } if ((additionalTree != nullptr) && GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTree->gtFlags)) { return false; } for (GenTreeCall::Use& use : GenTreeCall::UseList(additionalCallArgs)) { if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(use.GetNode()->gtFlags)) { return false; } } for (Statement* stmt : StatementList(impStmtList)) { GenTree* expr = stmt->GetRootNode(); if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags)) { return false; } } for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTreeFlags stackTreeFlags = verCurrentState.esStack[level].val->gtFlags; if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags)) { return false; } } return true; } //------------------------------------------------------------------------ // impMarkInlineCandidate: determine if this call can be subsequently inlined // // Arguments: // callNode -- call under scrutiny // exactContextHnd -- context handle for inlining // exactContextNeedsRuntimeLookup -- true if context required runtime lookup // callInfo -- call info from VM // // Notes: // Mostly a wrapper for impMarkInlineCandidateHelper that also undoes // guarded devirtualization for virtual calls where the method we'd // devirtualize to cannot be inlined. void Compiler::impMarkInlineCandidate(GenTree* callNode, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo) { GenTreeCall* call = callNode->AsCall(); // Do the actual evaluation impMarkInlineCandidateHelper(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); // If this call is an inline candidate or is not a guarded devirtualization // candidate, we're done. if (call->IsInlineCandidate() || !call->IsGuardedDevirtualizationCandidate()) { return; } // If we can't inline the call we'd guardedly devirtualize to, // we undo the guarded devirtualization, as the benefit from // just guarded devirtualization alone is likely not worth the // extra jit time and code size. // // TODO: it is possibly interesting to allow this, but requires // fixes elsewhere too... JITDUMP("Revoking guarded devirtualization candidacy for call [%06u]: target method can't be inlined\n", dspTreeID(call)); call->ClearGuardedDevirtualizationCandidate(); } //------------------------------------------------------------------------ // impMarkInlineCandidateHelper: determine if this call can be subsequently // inlined // // Arguments: // callNode -- call under scrutiny // exactContextHnd -- context handle for inlining // exactContextNeedsRuntimeLookup -- true if context required runtime lookup // callInfo -- call info from VM // // Notes: // If callNode is an inline candidate, this method sets the flag // GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have // filled in the associated InlineCandidateInfo. // // If callNode is not an inline candidate, and the reason is // something that is inherent to the method being called, the // method may be marked as "noinline" to short-circuit any // future assessments of calls to this method. void Compiler::impMarkInlineCandidateHelper(GenTreeCall* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo) { // Let the strategy know there's another call impInlineRoot()->m_inlineStrategy->NoteCall(); if (!opts.OptEnabled(CLFLG_INLINING)) { /* XXX Mon 8/18/2008 * This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before * calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and * CLFLG_MINOPT is set. That doesn't make a lot of sense. If you hit this assert, work back and * figure out why we did not set MAXOPT for this compile. */ assert(!compIsForInlining()); return; } if (compIsForImportOnly()) { // Don't bother creating the inline candidate during verification. // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification // that leads to the creation of multiple instances of Compiler. return; } InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate"); // Don't inline if not optimizing root method if (opts.compDbgCode) { inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN); return; } // Don't inline if inlining into this method is disabled. if (impInlineRoot()->m_inlineStrategy->IsInliningDisabled()) { inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE); return; } // Don't inline into callers that use the NextCallReturnAddress intrinsic. if (info.compHasNextCallRetAddr) { inlineResult.NoteFatal(InlineObservation::CALLER_USES_NEXT_CALL_RET_ADDR); return; } // Inlining candidate determination needs to honor only IL tail prefix. // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive). if (call->IsTailPrefixedCall()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX); return; } // Delegate Invoke method doesn't have a body and gets special cased instead. // Don't even bother trying to inline it. if (call->IsDelegateInvoke()) { inlineResult.NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY); return; } // Tail recursion elimination takes precedence over inlining. // TODO: We may want to do some of the additional checks from fgMorphCall // here to reduce the chance we don't inline a call that won't be optimized // as a fast tail call or turned into a loop. if (gtIsRecursiveCall(call) && call->IsImplicitTailCall()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL); return; } if (call->IsVirtual()) { // Allow guarded devirt calls to be treated as inline candidates, // but reject all other virtual calls. if (!call->IsGuardedDevirtualizationCandidate()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT); return; } } /* Ignore helper calls */ if (call->gtCallType == CT_HELPER) { assert(!call->IsGuardedDevirtualizationCandidate()); inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER); return; } /* Ignore indirect calls */ if (call->gtCallType == CT_INDIRECT) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED); return; } /* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less * restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding * inlining in throw blocks. I should consider the same thing for catch and filter regions. */ CORINFO_METHOD_HANDLE fncHandle; unsigned methAttr; if (call->IsGuardedDevirtualizationCandidate()) { if (call->gtGuardedDevirtualizationCandidateInfo->guardedMethodUnboxedEntryHandle != nullptr) { fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodUnboxedEntryHandle; } else { fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodHandle; } methAttr = info.compCompHnd->getMethodAttribs(fncHandle); } else { fncHandle = call->gtCallMethHnd; // Reuse method flags from the original callInfo if possible if (fncHandle == callInfo->hMethod) { methAttr = callInfo->methodFlags; } else { methAttr = info.compCompHnd->getMethodAttribs(fncHandle); } } #ifdef DEBUG if (compStressCompile(STRESS_FORCE_INLINE, 0)) { methAttr |= CORINFO_FLG_FORCEINLINE; } #endif // Check for COMPlus_AggressiveInlining if (compDoAggressiveInlining) { methAttr |= CORINFO_FLG_FORCEINLINE; } if (!(methAttr & CORINFO_FLG_FORCEINLINE)) { /* Don't bother inline blocks that are in the filter region */ if (bbInCatchHandlerILRange(compCurBB)) { #ifdef DEBUG if (verbose) { printf("\nWill not inline blocks that are in the catch handler region\n"); } #endif inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH); return; } if (bbInFilterILRange(compCurBB)) { #ifdef DEBUG if (verbose) { printf("\nWill not inline blocks that are in the filter region\n"); } #endif inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER); return; } } /* Check if we tried to inline this method before */ if (methAttr & CORINFO_FLG_DONT_INLINE) { inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE); return; } /* Cannot inline synchronized methods */ if (methAttr & CORINFO_FLG_SYNCH) { inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED); return; } /* Check legality of PInvoke callsite (for inlining of marshalling code) */ if (methAttr & CORINFO_FLG_PINVOKE) { // See comment in impCheckForPInvokeCall BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; if (!impCanPInvokeInlineCallSite(block)) { inlineResult.NoteFatal(InlineObservation::CALLSITE_PINVOKE_EH); return; } } InlineCandidateInfo* inlineCandidateInfo = nullptr; impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult); if (inlineResult.IsFailure()) { return; } // The old value should be null OR this call should be a guarded devirtualization candidate. assert((call->gtInlineCandidateInfo == nullptr) || call->IsGuardedDevirtualizationCandidate()); // The new value should not be null. assert(inlineCandidateInfo != nullptr); inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup; call->gtInlineCandidateInfo = inlineCandidateInfo; // If we're in an inlinee compiler, and have a return spill temp, and this inline candidate // is also a tail call candidate, it can use the same return spill temp. // if (compIsForInlining() && call->CanTailCall() && (impInlineInfo->inlineCandidateInfo->preexistingSpillTemp != BAD_VAR_NUM)) { inlineCandidateInfo->preexistingSpillTemp = impInlineInfo->inlineCandidateInfo->preexistingSpillTemp; JITDUMP("Inline candidate [%06u] can share spill temp V%02u\n", dspTreeID(call), inlineCandidateInfo->preexistingSpillTemp); } // Mark the call node as inline candidate. call->gtFlags |= GTF_CALL_INLINE_CANDIDATE; // Let the strategy know there's another candidate. impInlineRoot()->m_inlineStrategy->NoteCandidate(); // Since we're not actually inlining yet, and this call site is // still just an inline candidate, there's nothing to report. inlineResult.SetReported(); } /******************************************************************************/ // Returns true if the given intrinsic will be implemented by target-specific // instructions bool Compiler::IsTargetIntrinsic(NamedIntrinsic intrinsicName) { #if defined(TARGET_XARCH) switch (intrinsicName) { // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1 // instructions to directly compute round/ceiling/floor/truncate. case NI_System_Math_Abs: case NI_System_Math_Sqrt: return true; case NI_System_Math_Ceiling: case NI_System_Math_Floor: case NI_System_Math_Truncate: case NI_System_Math_Round: return compOpportunisticallyDependsOn(InstructionSet_SSE41); case NI_System_Math_FusedMultiplyAdd: return compOpportunisticallyDependsOn(InstructionSet_FMA); default: return false; } #elif defined(TARGET_ARM64) switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Ceiling: case NI_System_Math_Floor: case NI_System_Math_Truncate: case NI_System_Math_Round: case NI_System_Math_Sqrt: case NI_System_Math_Max: case NI_System_Math_Min: return true; case NI_System_Math_FusedMultiplyAdd: return compOpportunisticallyDependsOn(InstructionSet_AdvSimd); default: return false; } #elif defined(TARGET_ARM) switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Round: case NI_System_Math_Sqrt: return true; default: return false; } #else // TODO: This portion of logic is not implemented for other arch. // The reason for returning true is that on all other arch the only intrinsic // enabled are target intrinsics. return true; #endif } /******************************************************************************/ // Returns true if the given intrinsic will be implemented by calling System.Math // methods. bool Compiler::IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName) { // Currently, if a math intrinsic is not implemented by target-specific // instructions, it will be implemented by a System.Math call. In the // future, if we turn to implementing some of them with helper calls, // this predicate needs to be revisited. return !IsTargetIntrinsic(intrinsicName); } bool Compiler::IsMathIntrinsic(NamedIntrinsic intrinsicName) { switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Acos: case NI_System_Math_Acosh: case NI_System_Math_Asin: case NI_System_Math_Asinh: case NI_System_Math_Atan: case NI_System_Math_Atanh: case NI_System_Math_Atan2: case NI_System_Math_Cbrt: case NI_System_Math_Ceiling: case NI_System_Math_Cos: case NI_System_Math_Cosh: case NI_System_Math_Exp: case NI_System_Math_Floor: case NI_System_Math_FMod: case NI_System_Math_FusedMultiplyAdd: case NI_System_Math_ILogB: case NI_System_Math_Log: case NI_System_Math_Log2: case NI_System_Math_Log10: case NI_System_Math_Max: case NI_System_Math_Min: case NI_System_Math_Pow: case NI_System_Math_Round: case NI_System_Math_Sin: case NI_System_Math_Sinh: case NI_System_Math_Sqrt: case NI_System_Math_Tan: case NI_System_Math_Tanh: case NI_System_Math_Truncate: { assert((intrinsicName > NI_SYSTEM_MATH_START) && (intrinsicName < NI_SYSTEM_MATH_END)); return true; } default: { assert((intrinsicName < NI_SYSTEM_MATH_START) || (intrinsicName > NI_SYSTEM_MATH_END)); return false; } } } bool Compiler::IsMathIntrinsic(GenTree* tree) { return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->AsIntrinsic()->gtIntrinsicName); } //------------------------------------------------------------------------ // impDevirtualizeCall: Attempt to change a virtual vtable call into a // normal call // // Arguments: // call -- the call node to examine/modify // pResolvedToken -- [IN] the resolved token used to create the call. Used for R2R. // method -- [IN/OUT] the method handle for call. Updated iff call devirtualized. // methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized. // pContextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized. // pExactContextHandle -- [OUT] updated context handle iff call devirtualized // isLateDevirtualization -- if devirtualization is happening after importation // isExplicitTailCalll -- [IN] true if we plan on using an explicit tail call // ilOffset -- IL offset of the call // // Notes: // Virtual calls in IL will always "invoke" the base class method. // // This transformation looks for evidence that the type of 'this' // in the call is exactly known, is a final class or would invoke // a final method, and if that and other safety checks pan out, // modifies the call and the call info to create a direct call. // // This transformation is initially done in the importer and not // in some subsequent optimization pass because we want it to be // upstream of inline candidate identification. // // However, later phases may supply improved type information that // can enable further devirtualization. We currently reinvoke this // code after inlining, if the return value of the inlined call is // the 'this obj' of a subsequent virtual call. // // If devirtualization succeeds and the call's this object is a // (boxed) value type, the jit will ask the EE for the unboxed entry // point. If this exists, the jit will invoke the unboxed entry // on the box payload. In addition if the boxing operation is // visible to the jit and the call is the only consmer of the box, // the jit will try analyze the box to see if the call can be instead // instead made on a local copy. If that is doable, the call is // updated to invoke the unboxed entry on the local copy and the // boxing operation is removed. // // When guarded devirtualization is enabled, this method will mark // calls as guarded devirtualization candidates, if the type of `this` // is not exactly known, and there is a plausible guess for the type. void Compiler::impDevirtualizeCall(GenTreeCall* call, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_METHOD_HANDLE* method, unsigned* methodFlags, CORINFO_CONTEXT_HANDLE* pContextHandle, CORINFO_CONTEXT_HANDLE* pExactContextHandle, bool isLateDevirtualization, bool isExplicitTailCall, IL_OFFSET ilOffset) { assert(call != nullptr); assert(method != nullptr); assert(methodFlags != nullptr); assert(pContextHandle != nullptr); // This should be a virtual vtable or virtual stub call. // assert(call->IsVirtual()); // Possibly instrument. Note for OSR+PGO we will instrument when // optimizing and (currently) won't devirtualize. We may want // to revisit -- if we can devirtualize we should be able to // suppress the probe. // // We strip BBINSTR from inlinees currently, so we'll only // do this for the root method calls. // if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { assert(opts.OptimizationDisabled() || opts.IsOSR()); assert(!compIsForInlining()); // During importation, optionally flag this block as one that // contains calls requiring class profiling. Ideally perhaps // we'd just keep track of the calls themselves, so we don't // have to search for them later. // if ((call->gtCallType != CT_INDIRECT) && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && (JitConfig.JitClassProfiling() > 0) && !isLateDevirtualization) { JITDUMP("\n ... marking [%06u] in " FMT_BB " for class profile instrumentation\n", dspTreeID(call), compCurBB->bbNum); ClassProfileCandidateInfo* pInfo = new (this, CMK_Inlining) ClassProfileCandidateInfo; // Record some info needed for the class profiling probe. // pInfo->ilOffset = ilOffset; pInfo->probeIndex = info.compClassProbeCount++; call->gtClassProfileCandidateInfo = pInfo; // Flag block as needing scrutiny // compCurBB->bbFlags |= BBF_HAS_CLASS_PROFILE; } return; } // Bail if optimizations are disabled. if (opts.OptimizationDisabled()) { return; } #if defined(DEBUG) // Bail if devirt is disabled. if (JitConfig.JitEnableDevirtualization() == 0) { return; } // Optionally, print info on devirtualization Compiler* const rootCompiler = impInlineRoot(); const bool doPrint = JitConfig.JitPrintDevirtualizedMethods().contains(rootCompiler->info.compMethodName, rootCompiler->info.compClassName, &rootCompiler->info.compMethodInfo->args); #endif // DEBUG // Fetch information about the virtual method we're calling. CORINFO_METHOD_HANDLE baseMethod = *method; unsigned baseMethodAttribs = *methodFlags; if (baseMethodAttribs == 0) { // For late devirt we may not have method attributes, so fetch them. baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod); } else { #if defined(DEBUG) // Validate that callInfo has up to date method flags const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod); // All the base method attributes should agree, save that // CORINFO_FLG_DONT_INLINE may have changed from 0 to 1 // because of concurrent jitting activity. // // Note we don't look at this particular flag bit below, and // later on (if we do try and inline) we will rediscover why // the method can't be inlined, so there's no danger here in // seeing this particular flag bit in different states between // the cached and fresh values. if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE)) { assert(!"mismatched method attributes"); } #endif // DEBUG } // In R2R mode, we might see virtual stub calls to // non-virtuals. For instance cases where the non-virtual method // is in a different assembly but is called via CALLVIRT. For // verison resilience we must allow for the fact that the method // might become virtual in some update. // // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a // regular call+nullcheck upstream, so we won't reach this // point. if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0) { assert(call->IsVirtualStub()); assert(opts.IsReadyToRun()); JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n"); return; } // Fetch information about the class that introduced the virtual method. CORINFO_CLASS_HANDLE baseClass = info.compCompHnd->getMethodClass(baseMethod); const DWORD baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass); // Is the call an interface call? const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0; // See what we know about the type of 'this' in the call. GenTree* thisObj = call->gtCallThisArg->GetNode()->gtEffectiveVal(false); bool isExact = false; bool objIsNonNull = false; CORINFO_CLASS_HANDLE objClass = gtGetClassHandle(thisObj, &isExact, &objIsNonNull); // Bail if we know nothing. if (objClass == NO_CLASS_HANDLE) { JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet())); // Don't try guarded devirtualiztion when we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG("unknown")); return; } // If the objClass is sealed (final), then we may be able to devirtualize. const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass); const bool objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0; #if defined(DEBUG) const char* callKind = isInterface ? "interface" : "virtual"; const char* objClassNote = "[?]"; const char* objClassName = "?objClass"; const char* baseClassName = "?baseClass"; const char* baseMethodName = "?baseMethod"; if (verbose || doPrint) { objClassNote = isExact ? " [exact]" : objClassIsFinal ? " [final]" : ""; objClassName = info.compCompHnd->getClassName(objClass); baseClassName = info.compCompHnd->getClassName(baseClass); baseMethodName = eeGetMethodName(baseMethod, nullptr); if (verbose) { printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n" " class for 'this' is %s%s (attrib %08x)\n" " base method is %s::%s\n", callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName); } } #endif // defined(DEBUG) // See if the jit's best type for `obj` is an interface. // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal // IL_021d: ldloc.0 // IL_021e: callvirt instance int32 System.Object::GetHashCode() // // If so, we can't devirtualize, but we may be able to do guarded devirtualization. // if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0) { // Don't try guarded devirtualiztion when we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG(objClassName)); return; } // If we get this far, the jit has a lower bound class type for the `this` object being used for dispatch. // It may or may not know enough to devirtualize... if (isInterface) { assert(call->IsVirtualStub()); JITDUMP("--- base class is interface\n"); } // Fetch the method that would be called based on the declared type of 'this', // and prepare to fetch the method attributes. // CORINFO_DEVIRTUALIZATION_INFO dvInfo; dvInfo.virtualMethod = baseMethod; dvInfo.objClass = objClass; dvInfo.context = *pContextHandle; dvInfo.detail = CORINFO_DEVIRTUALIZATION_UNKNOWN; dvInfo.pResolvedTokenVirtualMethod = pResolvedToken; info.compCompHnd->resolveVirtualMethod(&dvInfo); CORINFO_METHOD_HANDLE derivedMethod = dvInfo.devirtualizedMethod; CORINFO_CONTEXT_HANDLE exactContext = dvInfo.exactContext; CORINFO_CLASS_HANDLE derivedClass = NO_CLASS_HANDLE; CORINFO_RESOLVED_TOKEN* pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedMethod; if (derivedMethod != nullptr) { assert(exactContext != nullptr); assert(((size_t)exactContext & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS); derivedClass = (CORINFO_CLASS_HANDLE)((size_t)exactContext & ~CORINFO_CONTEXTFLAGS_MASK); } DWORD derivedMethodAttribs = 0; bool derivedMethodIsFinal = false; bool canDevirtualize = false; #if defined(DEBUG) const char* derivedClassName = "?derivedClass"; const char* derivedMethodName = "?derivedMethod"; const char* note = "inexact or not final"; #endif // If we failed to get a method handle, we can't directly devirtualize. // // This can happen when prejitting, if the devirtualization crosses // servicing bubble boundaries, or if objClass is a shared class. // if (derivedMethod == nullptr) { JITDUMP("--- no derived method: %s\n", devirtualizationDetailToString(dvInfo.detail)); } else { // Fetch method attributes to see if method is marked final. derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod); derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0); #if defined(DEBUG) if (isExact) { note = "exact"; } else if (objClassIsFinal) { note = "final class"; } else if (derivedMethodIsFinal) { note = "final method"; } if (verbose || doPrint) { derivedMethodName = eeGetMethodName(derivedMethod, nullptr); derivedClassName = eeGetClassName(derivedClass); if (verbose) { printf(" devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note); gtDispTree(call); } } #endif // defined(DEBUG) canDevirtualize = isExact || objClassIsFinal || (!isInterface && derivedMethodIsFinal); } // We still might be able to do a guarded devirtualization. // Note the call might be an interface call or a virtual call. // if (!canDevirtualize) { JITDUMP(" Class not final or exact%s\n", isInterface ? "" : ", and method not final"); #if defined(DEBUG) // If we know the object type exactly, we generally expect we can devirtualize. // (don't when doing late devirt as we won't have an owner type (yet)) // if (!isLateDevirtualization && (isExact || objClassIsFinal) && JitConfig.JitNoteFailedExactDevirtualization()) { printf("@@@ Exact/Final devirt failure in %s at [%06u] $ %s\n", info.compFullName, dspTreeID(call), devirtualizationDetailToString(dvInfo.detail)); } #endif // Don't try guarded devirtualiztion if we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG(objClassName)); return; } // All checks done. Time to transform the call. // // We should always have an exact class context. // // Note that wouldnt' be true if the runtime side supported array interface devirt, // the resulting method would be a generic method of the non-generic SZArrayHelper class. // assert(canDevirtualize); JITDUMP(" %s; can devirtualize\n", note); // Make the updates. call->gtFlags &= ~GTF_CALL_VIRT_VTABLE; call->gtFlags &= ~GTF_CALL_VIRT_STUB; call->gtCallMethHnd = derivedMethod; call->gtCallType = CT_USER_FUNC; call->gtCallMoreFlags |= GTF_CALL_M_DEVIRTUALIZED; // Virtual calls include an implicit null check, which we may // now need to make explicit. if (!objIsNonNull) { call->gtFlags |= GTF_CALL_NULLCHECK; } // Clear the inline candidate info (may be non-null since // it's a union field used for other things by virtual // stubs) call->gtInlineCandidateInfo = nullptr; #if defined(DEBUG) if (verbose) { printf("... after devirt...\n"); gtDispTree(call); } if (doPrint) { printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName, baseMethodName, derivedClassName, derivedMethodName, note); } // If we successfully devirtualized based on an exact or final class, // and we have dynamic PGO data describing the likely class, make sure they agree. // // If pgo source is not dynamic we may see likely classes from other versions of this code // where types had different properties. // // If method is an inlinee we may be specializing to a class that wasn't seen at runtime. // const bool canSensiblyCheck = (isExact || objClassIsFinal) && (fgPgoSource == ICorJitInfo::PgoSource::Dynamic) && !compIsForInlining(); if (JitConfig.JitCrossCheckDevirtualizationAndPGO() && canSensiblyCheck) { // We only can handle a single likely class for now const int maxLikelyClasses = 1; LikelyClassRecord likelyClasses[maxLikelyClasses]; UINT32 numberOfClasses = getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset); UINT32 likelihood = likelyClasses[0].likelihood; CORINFO_CLASS_HANDLE likelyClass = likelyClasses[0].clsHandle; if (numberOfClasses > 0) { // PGO had better agree the class we devirtualized to is plausible. // if (likelyClass != derivedClass) { // Managed type system may report different addresses for a class handle // at different times....? // // Also, AOT may have a more nuanced notion of class equality. // if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { bool mismatch = true; // derivedClass will be the introducer of derived method, so it's possible // likelyClass is a non-overriding subclass. Check up the hierarchy. // CORINFO_CLASS_HANDLE parentClass = likelyClass; while (parentClass != NO_CLASS_HANDLE) { if (parentClass == derivedClass) { mismatch = false; break; } parentClass = info.compCompHnd->getParentType(parentClass); } if (mismatch || (numberOfClasses != 1) || (likelihood != 100)) { printf("@@@ Likely %p (%s) != Derived %p (%s) [n=%u, l=%u, il=%u] in %s \n", likelyClass, eeGetClassName(likelyClass), derivedClass, eeGetClassName(derivedClass), numberOfClasses, likelihood, ilOffset, info.compFullName); } assert(!(mismatch || (numberOfClasses != 1) || (likelihood != 100))); } } } } #endif // defined(DEBUG) // If the 'this' object is a value class, see if we can rework the call to invoke the // unboxed entry. This effectively inlines the normally un-inlineable wrapper stub // and exposes the potentially inlinable unboxed entry method. // // We won't optimize explicit tail calls, as ensuring we get the right tail call info // is tricky (we'd need to pass an updated sig and resolved token back to some callers). // // Note we may not have a derived class in some cases (eg interface call on an array) // if (info.compCompHnd->isValueClass(derivedClass)) { if (isExplicitTailCall) { JITDUMP("Have a direct explicit tail call to boxed entry point; can't optimize further\n"); } else { JITDUMP("Have a direct call to boxed entry point. Trying to optimize to call an unboxed entry point\n"); // Note for some shared methods the unboxed entry point requires an extra parameter. bool requiresInstMethodTableArg = false; CORINFO_METHOD_HANDLE unboxedEntryMethod = info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg); if (unboxedEntryMethod != nullptr) { bool optimizedTheBox = false; // If the 'this' object is a local box, see if we can revise things // to not require boxing. // if (thisObj->IsBoxedValue() && !isExplicitTailCall) { // Since the call is the only consumer of the box, we know the box can't escape // since it is being passed an interior pointer. // // So, revise the box to simply create a local copy, use the address of that copy // as the this pointer, and update the entry point to the unboxed entry. // // Ideally, we then inline the boxed method and and if it turns out not to modify // the copy, we can undo the copy too. if (requiresInstMethodTableArg) { // Perform a trial box removal and ask for the type handle tree that fed the box. // JITDUMP("Unboxed entry needs method table arg...\n"); GenTree* methodTableArg = gtTryRemoveBoxUpstreamEffects(thisObj, BR_DONT_REMOVE_WANT_TYPE_HANDLE); if (methodTableArg != nullptr) { // If that worked, turn the box into a copy to a local var // JITDUMP("Found suitable method table arg tree [%06u]\n", dspTreeID(methodTableArg)); GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY); if (localCopyThis != nullptr) { // Pass the local var as this and the type handle as a new arg // JITDUMP("Success! invoking unboxed entry point on local copy, and passing method table " "arg\n"); call->gtCallThisArg = gtNewCallArgs(localCopyThis); call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; // Prepend for R2L arg passing or empty L2R passing // Append for non-empty L2R // if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr)) { // If there's a ret buf, the method table is the second arg. // if (call->HasRetBufArg()) { gtInsertNewCallArgAfter(methodTableArg, call->gtCallArgs); } else { call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs); } } else { GenTreeCall::Use* beforeArg = call->gtCallArgs; while (beforeArg->GetNext() != nullptr) { beforeArg = beforeArg->GetNext(); } beforeArg->SetNext(gtNewCallArgs(methodTableArg)); } call->gtCallMethHnd = unboxedEntryMethod; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; // Method attributes will differ because unboxed entry point is shared // const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod); JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs, unboxedMethodAttribs); derivedMethodAttribs = unboxedMethodAttribs; optimizedTheBox = true; } else { JITDUMP("Sorry, failed to undo the box -- can't convert to local copy\n"); } } else { JITDUMP("Sorry, failed to undo the box -- can't find method table arg\n"); } } else { JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n"); GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY); if (localCopyThis != nullptr) { JITDUMP("Success! invoking unboxed entry point on local copy\n"); call->gtCallThisArg = gtNewCallArgs(localCopyThis); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; optimizedTheBox = true; } else { JITDUMP("Sorry, failed to undo the box\n"); } } if (optimizedTheBox) { #if FEATURE_TAILCALL_OPT if (call->IsImplicitTailCall()) { JITDUMP("Clearing the implicit tail call flag\n"); // If set, we clear the implicit tail call flag // as we just introduced a new address taken local variable // call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL; } #endif // FEATURE_TAILCALL_OPT } } if (!optimizedTheBox) { // If we get here, we have a boxed value class that either wasn't boxed // locally, or was boxed locally but we were unable to remove the box for // various reasons. // // We can still update the call to invoke the unboxed entry, if the // boxed value is simple. // if (requiresInstMethodTableArg) { // Get the method table from the boxed object. // GenTree* const thisArg = call->gtCallThisArg->GetNode(); GenTree* const clonedThisArg = gtClone(thisArg); if (clonedThisArg == nullptr) { JITDUMP( "unboxed entry needs MT arg, but `this` was too complex to clone. Deferring update.\n"); } else { JITDUMP("revising call to invoke unboxed entry with additional method table arg\n"); GenTree* const methodTableArg = gtNewMethodTableLookup(clonedThisArg); // Update the 'this' pointer to refer to the box payload // GenTree* const payloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* const boxPayload = gtNewOperNode(GT_ADD, TYP_BYREF, thisArg, payloadOffset); call->gtCallThisArg = gtNewCallArgs(boxPayload); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; // Method attributes will differ because unboxed entry point is shared // const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod); JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs, unboxedMethodAttribs); derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; derivedMethodAttribs = unboxedMethodAttribs; // Add the method table argument. // // Prepend for R2L arg passing or empty L2R passing // Append for non-empty L2R // if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr)) { // If there's a ret buf, the method table is the second arg. // if (call->HasRetBufArg()) { gtInsertNewCallArgAfter(methodTableArg, call->gtCallArgs); } else { call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs); } } else { GenTreeCall::Use* beforeArg = call->gtCallArgs; while (beforeArg->GetNext() != nullptr) { beforeArg = beforeArg->GetNext(); } beforeArg->SetNext(gtNewCallArgs(methodTableArg)); } } } else { JITDUMP("revising call to invoke unboxed entry\n"); GenTree* const thisArg = call->gtCallThisArg->GetNode(); GenTree* const payloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* const boxPayload = gtNewOperNode(GT_ADD, TYP_BYREF, thisArg, payloadOffset); call->gtCallThisArg = gtNewCallArgs(boxPayload); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; } } } else { // Many of the low-level methods on value classes won't have unboxed entries, // as they need access to the type of the object. // // Note this may be a cue for us to stack allocate the boxed object, since // we probably know that these objects don't escape. JITDUMP("Sorry, failed to find unboxed entry point\n"); } } } // Need to update call info too. // *method = derivedMethod; *methodFlags = derivedMethodAttribs; // Update context handle // *pContextHandle = MAKE_METHODCONTEXT(derivedMethod); // Update exact context handle. // if (pExactContextHandle != nullptr) { *pExactContextHandle = MAKE_CLASSCONTEXT(derivedClass); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // For R2R, getCallInfo triggers bookkeeping on the zap // side and acquires the actual symbol to call so we need to call it here. // Look up the new call info. CORINFO_CALL_INFO derivedCallInfo; eeGetCallInfo(pDerivedResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, &derivedCallInfo); // Update the call. call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT; call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT; call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup); } #endif // FEATURE_READYTORUN } //------------------------------------------------------------------------ // impGetSpecialIntrinsicExactReturnType: Look for special cases where a call // to an intrinsic returns an exact type // // Arguments: // methodHnd -- handle for the special intrinsic method // // Returns: // Exact class handle returned by the intrinsic call, if known. // Nullptr if not known, or not likely to lead to beneficial optimization. CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd) { JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd)); CORINFO_CLASS_HANDLE result = nullptr; // See what intrinisc we have... const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd); switch (ni) { case NI_System_Collections_Generic_Comparer_get_Default: case NI_System_Collections_Generic_EqualityComparer_get_Default: { // Expect one class generic parameter; figure out which it is. CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(methodHnd, &sig); assert(sig.sigInst.classInstCount == 1); CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0]; assert(typeHnd != nullptr); // Lookup can incorrect when we have __Canon as it won't appear // to implement any interface types. // // And if we do not have a final type, devirt & inlining is // unlikely to result in much simplification. // // We can use CORINFO_FLG_FINAL to screen out both of these cases. const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd); const bool isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0); if (isFinalType) { if (ni == NI_System_Collections_Generic_EqualityComparer_get_Default) { result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd); } else { assert(ni == NI_System_Collections_Generic_Comparer_get_Default); result = info.compCompHnd->getDefaultComparerClass(typeHnd); } JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd), result != nullptr ? eeGetClassName(result) : "unknown"); } else { JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd)); } break; } default: { JITDUMP("This special intrinsic not handled, sorry...\n"); break; } } return result; } //------------------------------------------------------------------------ // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it. // // Arguments: // token - init value for the allocated token. // // Return Value: // pointer to token into jit-allocated memory. CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(const CORINFO_RESOLVED_TOKEN& token) { CORINFO_RESOLVED_TOKEN* memory = getAllocator(CMK_Unknown).allocate<CORINFO_RESOLVED_TOKEN>(1); *memory = token; return memory; } //------------------------------------------------------------------------ // SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local variables. // class SpillRetExprHelper { public: SpillRetExprHelper(Compiler* comp) : comp(comp) { } void StoreRetExprResultsInArgs(GenTreeCall* call) { for (GenTreeCall::Use& use : call->Args()) { comp->fgWalkTreePre(&use.NodeRef(), SpillRetExprVisitor, this); } if (call->gtCallThisArg != nullptr) { comp->fgWalkTreePre(&call->gtCallThisArg->NodeRef(), SpillRetExprVisitor, this); } } private: static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre) { assert((pTree != nullptr) && (*pTree != nullptr)); GenTree* tree = *pTree; if ((tree->gtFlags & GTF_CALL) == 0) { // Trees with ret_expr are marked as GTF_CALL. return Compiler::WALK_SKIP_SUBTREES; } if (tree->OperGet() == GT_RET_EXPR) { SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData); walker->StoreRetExprAsLocalVar(pTree); } return Compiler::WALK_CONTINUE; } void StoreRetExprAsLocalVar(GenTree** pRetExpr) { GenTree* retExpr = *pRetExpr; assert(retExpr->OperGet() == GT_RET_EXPR); const unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr")); JITDUMP("Storing return expression [%06u] to a local var V%02u.\n", comp->dspTreeID(retExpr), tmp); comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE); *pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet()); if (retExpr->TypeGet() == TYP_REF) { assert(comp->lvaTable[tmp].lvSingleDef == 0); comp->lvaTable[tmp].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmp); bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE retClsHnd = comp->gtGetClassHandle(retExpr, &isExact, &isNonNull); if (retClsHnd != nullptr) { comp->lvaSetClass(tmp, retClsHnd, isExact); } } } private: Compiler* comp; }; //------------------------------------------------------------------------ // addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate. // Spill ret_expr in the call node, because they can't be cloned. // // Arguments: // call - fat calli candidate // void Compiler::addFatPointerCandidate(GenTreeCall* call) { JITDUMP("Marking call [%06u] as fat pointer candidate\n", dspTreeID(call)); setMethodHasFatPointer(); call->SetFatPointerCandidate(); SpillRetExprHelper helper(this); helper.StoreRetExprResultsInArgs(call); } //------------------------------------------------------------------------ // considerGuardedDevirtualization: see if we can profitably guess at the // class involved in an interface or virtual call. // // Arguments: // // call - potential guarded devirtualization candidate // ilOffset - IL ofset of the call instruction // isInterface - true if this is an interface call // baseMethod - target method of the call // baseClass - class that introduced the target method // pContextHandle - context handle for the call // objClass - class of 'this' in the call // objClassName - name of the obj Class // // Notes: // Consults with VM to see if there's a likely class at runtime, // if so, adds a candidate for guarded devirtualization. // void Compiler::considerGuardedDevirtualization( GenTreeCall* call, IL_OFFSET ilOffset, bool isInterface, CORINFO_METHOD_HANDLE baseMethod, CORINFO_CLASS_HANDLE baseClass, CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass) DEBUGARG(const char* objClassName)) { #if defined(DEBUG) const char* callKind = isInterface ? "interface" : "virtual"; #endif JITDUMP("Considering guarded devirtualization at IL offset %u (0x%x)\n", ilOffset, ilOffset); // We currently only get likely class guesses when there is PGO data // with class profiles. // if (fgPgoClassProfiles == 0) { JITDUMP("Not guessing for class: no class profile pgo data, or pgo disabled\n"); return; } // See if there's a likely guess for the class. // const unsigned likelihoodThreshold = isInterface ? 25 : 30; unsigned likelihood = 0; unsigned numberOfClasses = 0; CORINFO_CLASS_HANDLE likelyClass = NO_CLASS_HANDLE; bool doRandomDevirt = false; const int maxLikelyClasses = 32; LikelyClassRecord likelyClasses[maxLikelyClasses]; #ifdef DEBUG // Optional stress mode to pick a random known class, rather than // the most likely known class. // doRandomDevirt = JitConfig.JitRandomGuardedDevirtualization() != 0; if (doRandomDevirt) { // Reuse the random inliner's random state. // CLRRandom* const random = impInlineRoot()->m_inlineStrategy->GetRandom(JitConfig.JitRandomGuardedDevirtualization()); likelyClasses[0].clsHandle = getRandomClass(fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset, random); likelyClasses[0].likelihood = 100; if (likelyClasses[0].clsHandle != NO_CLASS_HANDLE) { numberOfClasses = 1; } } else #endif { numberOfClasses = getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset); } // For now we only use the most popular type likelihood = likelyClasses[0].likelihood; likelyClass = likelyClasses[0].clsHandle; if (numberOfClasses < 1) { JITDUMP("No likely class, sorry\n"); return; } assert(likelyClass != NO_CLASS_HANDLE); // Print all likely classes JITDUMP("%s classes for %p (%s):\n", doRandomDevirt ? "Random" : "Likely", dspPtr(objClass), objClassName) for (UINT32 i = 0; i < numberOfClasses; i++) { JITDUMP(" %u) %p (%s) [likelihood:%u%%]\n", i + 1, likelyClasses[i].clsHandle, eeGetClassName(likelyClasses[i].clsHandle), likelyClasses[i].likelihood); } // Todo: a more advanced heuristic using likelihood, number of // classes, and the profile count for this block. // // For now we will guess if the likelihood is at least 25%/30% (intfc/virt), as studies // have shown this transformation should pay off even if we guess wrong sometimes. // if (likelihood < likelihoodThreshold) { JITDUMP("Not guessing for class; likelihood is below %s call threshold %u\n", callKind, likelihoodThreshold); return; } uint32_t const likelyClassAttribs = info.compCompHnd->getClassAttribs(likelyClass); if ((likelyClassAttribs & CORINFO_FLG_ABSTRACT) != 0) { // We may see an abstract likely class, if we have a stale profile. // No point guessing for this. // JITDUMP("Not guessing for class; abstract (stale profile)\n"); return; } // Figure out which method will be called. // CORINFO_DEVIRTUALIZATION_INFO dvInfo; dvInfo.virtualMethod = baseMethod; dvInfo.objClass = likelyClass; dvInfo.context = *pContextHandle; dvInfo.exactContext = *pContextHandle; dvInfo.pResolvedTokenVirtualMethod = nullptr; const bool canResolve = info.compCompHnd->resolveVirtualMethod(&dvInfo); if (!canResolve) { JITDUMP("Can't figure out which method would be invoked, sorry\n"); return; } CORINFO_METHOD_HANDLE likelyMethod = dvInfo.devirtualizedMethod; JITDUMP("%s call would invoke method %s\n", callKind, eeGetMethodName(likelyMethod, nullptr)); // Add this as a potential candidate. // uint32_t const likelyMethodAttribs = info.compCompHnd->getMethodAttribs(likelyMethod); addGuardedDevirtualizationCandidate(call, likelyMethod, likelyClass, likelyMethodAttribs, likelyClassAttribs, likelihood); } //------------------------------------------------------------------------ // addGuardedDevirtualizationCandidate: potentially mark the call as a guarded // devirtualization candidate // // Notes: // // Call sites in rare or unoptimized code, and calls that require cookies are // not marked as candidates. // // As part of marking the candidate, the code spills GT_RET_EXPRs anywhere in any // child tree, because and we need to clone all these trees when we clone the call // as part of guarded devirtualization, and these IR nodes can't be cloned. // // Arguments: // call - potential guarded devirtualization candidate // methodHandle - method that will be invoked if the class test succeeds // classHandle - class that will be tested for at runtime // methodAttr - attributes of the method // classAttr - attributes of the class // likelihood - odds that this class is the class seen at runtime // void Compiler::addGuardedDevirtualizationCandidate(GenTreeCall* call, CORINFO_METHOD_HANDLE methodHandle, CORINFO_CLASS_HANDLE classHandle, unsigned methodAttr, unsigned classAttr, unsigned likelihood) { // This transformation only makes sense for virtual calls assert(call->IsVirtual()); // Only mark calls if the feature is enabled. const bool isEnabled = JitConfig.JitEnableGuardedDevirtualization() > 0; if (!isEnabled) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- disabled by jit config\n", dspTreeID(call)); return; } // Bail if not optimizing or the call site is very likely cold if (compCurBB->isRunRarely() || opts.OptimizationDisabled()) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- rare / dbg / minopts\n", dspTreeID(call)); return; } // CT_INDIRECT calls may use the cookie, bail if so... // // If transforming these provides a benefit, we could save this off in the same way // we save the stub address below. if ((call->gtCallType == CT_INDIRECT) && (call->AsCall()->gtCallCookie != nullptr)) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- CT_INDIRECT with cookie\n", dspTreeID(call)); return; } #ifdef DEBUG // See if disabled by range // static ConfigMethodRange JitGuardedDevirtualizationRange; JitGuardedDevirtualizationRange.EnsureInit(JitConfig.JitGuardedDevirtualizationRange()); assert(!JitGuardedDevirtualizationRange.Error()); if (!JitGuardedDevirtualizationRange.Contains(impInlineRoot()->info.compMethodHash())) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- excluded by " "JitGuardedDevirtualizationRange", dspTreeID(call)); return; } #endif // We're all set, proceed with candidate creation. // JITDUMP("Marking call [%06u] as guarded devirtualization candidate; will guess for class %s\n", dspTreeID(call), eeGetClassName(classHandle)); setMethodHasGuardedDevirtualization(); call->SetGuardedDevirtualizationCandidate(); // Spill off any GT_RET_EXPR subtrees so we can clone the call. // SpillRetExprHelper helper(this); helper.StoreRetExprResultsInArgs(call); // Gather some information for later. Note we actually allocate InlineCandidateInfo // here, as the devirtualized half of this call will likely become an inline candidate. // GuardedDevirtualizationCandidateInfo* pInfo = new (this, CMK_Inlining) InlineCandidateInfo; pInfo->guardedMethodHandle = methodHandle; pInfo->guardedMethodUnboxedEntryHandle = nullptr; pInfo->guardedClassHandle = classHandle; pInfo->likelihood = likelihood; pInfo->requiresInstMethodTableArg = false; // If the guarded class is a value class, look for an unboxed entry point. // if ((classAttr & CORINFO_FLG_VALUECLASS) != 0) { JITDUMP(" ... class is a value class, looking for unboxed entry\n"); bool requiresInstMethodTableArg = false; CORINFO_METHOD_HANDLE unboxedEntryMethodHandle = info.compCompHnd->getUnboxedEntry(methodHandle, &requiresInstMethodTableArg); if (unboxedEntryMethodHandle != nullptr) { JITDUMP(" ... updating GDV candidate with unboxed entry info\n"); pInfo->guardedMethodUnboxedEntryHandle = unboxedEntryMethodHandle; pInfo->requiresInstMethodTableArg = requiresInstMethodTableArg; } } call->gtGuardedDevirtualizationCandidateInfo = pInfo; } void Compiler::addExpRuntimeLookupCandidate(GenTreeCall* call) { setMethodHasExpRuntimeLookup(); call->SetExpRuntimeLookup(); } //------------------------------------------------------------------------ // impIsClassExact: check if a class handle can only describe values // of exactly one class. // // Arguments: // classHnd - handle for class in question // // Returns: // true if class is final and not subject to special casting from // variance or similar. // // Note: // We are conservative on arrays of primitive types here. bool Compiler::impIsClassExact(CORINFO_CLASS_HANDLE classHnd) { DWORD flags = info.compCompHnd->getClassAttribs(classHnd); DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY; if ((flags & flagsMask) == CORINFO_FLG_FINAL) { return true; } if ((flags & flagsMask) == (CORINFO_FLG_FINAL | CORINFO_FLG_ARRAY)) { CORINFO_CLASS_HANDLE arrayElementHandle = nullptr; CorInfoType type = info.compCompHnd->getChildType(classHnd, &arrayElementHandle); if ((type == CORINFO_TYPE_CLASS) || (type == CORINFO_TYPE_VALUECLASS)) { return impIsClassExact(arrayElementHandle); } } return false; } //------------------------------------------------------------------------ // impCanSkipCovariantStoreCheck: see if storing a ref type value to an array // can skip the array store covariance check. // // Arguments: // value -- tree producing the value to store // array -- tree representing the array to store to // // Returns: // true if the store does not require a covariance check. // bool Compiler::impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array) { // We should only call this when optimizing. assert(opts.OptimizationEnabled()); // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j] if (value->OperIs(GT_INDEX) && array->OperIs(GT_LCL_VAR)) { GenTree* valueIndex = value->AsIndex()->Arr(); if (valueIndex->OperIs(GT_LCL_VAR)) { unsigned valueLcl = valueIndex->AsLclVar()->GetLclNum(); unsigned arrayLcl = array->AsLclVar()->GetLclNum(); if ((valueLcl == arrayLcl) && !lvaGetDesc(arrayLcl)->IsAddressExposed()) { JITDUMP("\nstelem of ref from same array: skipping covariant store check\n"); return true; } } } // Check for assignment of NULL. if (value->OperIs(GT_CNS_INT)) { assert(value->gtType == TYP_REF); if (value->AsIntCon()->gtIconVal == 0) { JITDUMP("\nstelem of null: skipping covariant store check\n"); return true; } // Non-0 const refs can only occur with frozen objects assert(value->IsIconHandle(GTF_ICON_STR_HDL)); assert(doesMethodHaveFrozenString() || (compIsForInlining() && impInlineInfo->InlinerCompiler->doesMethodHaveFrozenString())); } // Try and get a class handle for the array if (value->gtType != TYP_REF) { return false; } bool arrayIsExact = false; bool arrayIsNonNull = false; CORINFO_CLASS_HANDLE arrayHandle = gtGetClassHandle(array, &arrayIsExact, &arrayIsNonNull); if (arrayHandle == NO_CLASS_HANDLE) { return false; } // There are some methods in corelib where we're storing to an array but the IL // doesn't reflect this (see SZArrayHelper). Avoid. DWORD attribs = info.compCompHnd->getClassAttribs(arrayHandle); if ((attribs & CORINFO_FLG_ARRAY) == 0) { return false; } CORINFO_CLASS_HANDLE arrayElementHandle = nullptr; CorInfoType arrayElemType = info.compCompHnd->getChildType(arrayHandle, &arrayElementHandle); // Verify array type handle is really an array of ref type assert(arrayElemType == CORINFO_TYPE_CLASS); // Check for exactly object[] if (arrayIsExact && (arrayElementHandle == impGetObjectClass())) { JITDUMP("\nstelem to (exact) object[]: skipping covariant store check\n"); return true; } const bool arrayTypeIsSealed = impIsClassExact(arrayElementHandle); if ((!arrayIsExact && !arrayTypeIsSealed) || (arrayElementHandle == NO_CLASS_HANDLE)) { // Bail out if we don't know array's exact type return false; } bool valueIsExact = false; bool valueIsNonNull = false; CORINFO_CLASS_HANDLE valueHandle = gtGetClassHandle(value, &valueIsExact, &valueIsNonNull); // Array's type is sealed and equals to value's type if (arrayTypeIsSealed && (valueHandle == arrayElementHandle)) { JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n"); return true; } // Array's type is not sealed but we know its exact type if (arrayIsExact && (valueHandle != NO_CLASS_HANDLE) && (info.compCompHnd->compareTypesForCast(valueHandle, arrayElementHandle) == TypeCompareState::Must)) { JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n"); return true; } return false; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Importer XX XX XX XX Imports the given method and converts it to semantic trees XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "corexcep.h" #define Verify(cond, msg) \ do \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ } \ } while (0) #define VerifyOrReturn(cond, msg) \ do \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ return; \ } \ } while (0) #define VerifyOrReturnSpeculative(cond, msg, speculative) \ do \ { \ if (speculative) \ { \ if (!(cond)) \ { \ return false; \ } \ } \ else \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ return false; \ } \ } \ } while (0) /*****************************************************************************/ void Compiler::impInit() { impStmtList = impLastStmt = nullptr; #ifdef DEBUG impInlinedCodeSize = 0; #endif // DEBUG } /***************************************************************************** * * Pushes the given tree on the stack. */ void Compiler::impPushOnStack(GenTree* tree, typeInfo ti) { /* Check for overflow. If inlining, we may be using a bigger stack */ if ((verCurrentState.esStackDepth >= info.compMaxStack) && (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0))) { BADCODE("stack overflow"); } #ifdef DEBUG // If we are pushing a struct, make certain we know the precise type! if (tree->TypeGet() == TYP_STRUCT) { assert(ti.IsType(TI_STRUCT)); CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle(); assert(clsHnd != NO_CLASS_HANDLE); } #endif // DEBUG verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti; verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree; if ((tree->gtType == TYP_LONG) && (compLongUsed == false)) { compLongUsed = true; } else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false)) { compFloatingPointUsed = true; } } inline void Compiler::impPushNullObjRefOnStack() { impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL)); } // This method gets called when we run into unverifiable code // (and we are verifying the method) inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file) DEBUGARG(unsigned line)) { #ifdef DEBUG const char* tail = strrchr(file, '\\'); if (tail) { file = tail + 1; } if (JitConfig.JitBreakOnUnsafeCode()) { assert(!"Unsafe code detected"); } #endif JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); if (compIsForImportOnly()) { JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line)); } } inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file) DEBUGARG(unsigned line)) { JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); #ifdef DEBUG // BreakIfDebuggerPresent(); if (getBreakOnBadCode()) { assert(!"Typechecking error"); } #endif RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr); UNREACHABLE(); } // helper function that will tell us if the IL instruction at the addr passed // by param consumes an address at the top of the stack. We use it to save // us lvAddrTaken bool Compiler::impILConsumesAddr(const BYTE* codeAddr) { assert(!compIsForInlining()); OPCODE opcode; opcode = (OPCODE)getU1LittleEndian(codeAddr); switch (opcode) { // case CEE_LDFLDA: We're taking this one out as if you have a sequence // like // // ldloca.0 // ldflda whatever // // of a primitivelike struct, you end up after morphing with addr of a local // that's not marked as addrtaken, which is wrong. Also ldflda is usually used // for structs that contain other structs, which isnt a case we handle very // well now for other reasons. case CEE_LDFLD: { // We won't collapse small fields. This is probably not the right place to have this // check, but we're only using the function for this purpose, and is easy to factor // out if we need to do so. CORINFO_RESOLVED_TOKEN resolvedToken; impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field); var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField)); // Preserve 'small' int types if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } if (varTypeIsSmall(lclTyp)) { return false; } return true; } default: break; } return false; } void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind) { pResolvedToken->tokenContext = impTokenLookupContextHandle; pResolvedToken->tokenScope = info.compScopeHnd; pResolvedToken->token = getU4LittleEndian(addr); pResolvedToken->tokenType = kind; info.compCompHnd->resolveToken(pResolvedToken); } /***************************************************************************** * * Pop one tree from the stack. */ StackEntry Compiler::impPopStack() { if (verCurrentState.esStackDepth == 0) { BADCODE("stack underflow"); } return verCurrentState.esStack[--verCurrentState.esStackDepth]; } /***************************************************************************** * * Peep at n'th (0-based) tree on the top of the stack. */ StackEntry& Compiler::impStackTop(unsigned n) { if (verCurrentState.esStackDepth <= n) { BADCODE("stack underflow"); } return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1]; } unsigned Compiler::impStackHeight() { return verCurrentState.esStackDepth; } /***************************************************************************** * Some of the trees are spilled specially. While unspilling them, or * making a copy, these need to be handled specially. The function * enumerates the operators possible after spilling. */ #ifdef DEBUG // only used in asserts static bool impValidSpilledStackEntry(GenTree* tree) { if (tree->gtOper == GT_LCL_VAR) { return true; } if (tree->OperIsConst()) { return true; } return false; } #endif /***************************************************************************** * * The following logic is used to save/restore stack contents. * If 'copy' is true, then we make a copy of the trees on the stack. These * have to all be cloneable/spilled values. */ void Compiler::impSaveStackState(SavedStack* savePtr, bool copy) { savePtr->ssDepth = verCurrentState.esStackDepth; if (verCurrentState.esStackDepth) { savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth]; size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees); if (copy) { StackEntry* table = savePtr->ssTrees; /* Make a fresh copy of all the stack entries */ for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++) { table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo; GenTree* tree = verCurrentState.esStack[level].val; assert(impValidSpilledStackEntry(tree)); switch (tree->gtOper) { case GT_CNS_INT: case GT_CNS_LNG: case GT_CNS_DBL: case GT_CNS_STR: case GT_LCL_VAR: table->val = gtCloneExpr(tree); break; default: assert(!"Bad oper - Not covered by impValidSpilledStackEntry()"); break; } } } else { memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize); } } } void Compiler::impRestoreStackState(SavedStack* savePtr) { verCurrentState.esStackDepth = savePtr->ssDepth; if (verCurrentState.esStackDepth) { memcpy(verCurrentState.esStack, savePtr->ssTrees, verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack)); } } //------------------------------------------------------------------------ // impBeginTreeList: Get the tree list started for a new basic block. // inline void Compiler::impBeginTreeList() { assert(impStmtList == nullptr && impLastStmt == nullptr); } /***************************************************************************** * * Store the given start and end stmt in the given basic block. This is * mostly called by impEndTreeList(BasicBlock *block). It is called * directly only for handling CEE_LEAVEs out of finally-protected try's. */ inline void Compiler::impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt) { /* Make the list circular, so that we can easily walk it backwards */ firstStmt->SetPrevStmt(lastStmt); /* Store the tree list in the basic block */ block->bbStmtList = firstStmt; /* The block should not already be marked as imported */ assert((block->bbFlags & BBF_IMPORTED) == 0); block->bbFlags |= BBF_IMPORTED; } inline void Compiler::impEndTreeList(BasicBlock* block) { if (impStmtList == nullptr) { // The block should not already be marked as imported. assert((block->bbFlags & BBF_IMPORTED) == 0); // Empty block. Just mark it as imported. block->bbFlags |= BBF_IMPORTED; } else { impEndTreeList(block, impStmtList, impLastStmt); } #ifdef DEBUG if (impLastILoffsStmt != nullptr) { impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); impLastILoffsStmt = nullptr; } #endif impStmtList = impLastStmt = nullptr; } /***************************************************************************** * * Check that storing the given tree doesnt mess up the semantic order. Note * that this has only limited value as we can only check [0..chkLevel). */ inline void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel) { #ifndef DEBUG return; #else if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE) { return; } GenTree* tree = stmt->GetRootNode(); // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack if (tree->gtFlags & GTF_CALL) { for (unsigned level = 0; level < chkLevel; level++) { assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0); } } if (tree->gtOper == GT_ASG) { // For an assignment to a local variable, all references of that // variable have to be spilled. If it is aliased, all calls and // indirect accesses have to be spilled if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { unsigned lclNum = tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); for (unsigned level = 0; level < chkLevel; level++) { assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum)); assert(!lvaTable[lclNum].IsAddressExposed() || (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0); } } // If the access may be to global memory, all side effects have to be spilled. else if (tree->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) { for (unsigned level = 0; level < chkLevel; level++) { assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0); } } } #endif } //------------------------------------------------------------------------ // impAppendStmt: Append the given statement to the current block's tree list. // // // Arguments: // stmt - The statement to add. // chkLevel - [0..chkLevel) is the portion of the stack which we will check // for interference with stmt and spill if needed. // checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI // marks the debug info of the current boundary and is set when we // start importing IL at that boundary. If this parameter is true, // then the function checks if 'stmt' has been associated with the // current boundary, and if so, clears it so that we do not attach // it to more upcoming statements. // void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo) { if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } if ((chkLevel != 0) && (chkLevel != (unsigned)CHECK_SPILL_NONE)) { assert(chkLevel <= verCurrentState.esStackDepth); /* If the statement being appended has any side-effects, check the stack to see if anything needs to be spilled to preserve correct ordering. */ GenTree* expr = stmt->GetRootNode(); GenTreeFlags flags = expr->gtFlags & GTF_GLOB_EFFECT; // Assignment to (unaliased) locals don't count as a side-effect as // we handle them specially using impSpillLclRefs(). Temp locals should // be fine too. if ((expr->gtOper == GT_ASG) && (expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR) && ((expr->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) == 0) && !gtHasLocalsWithAddrOp(expr->AsOp()->gtOp2)) { GenTreeFlags op2Flags = expr->AsOp()->gtOp2->gtFlags & GTF_GLOB_EFFECT; assert(flags == (op2Flags | GTF_ASG)); flags = op2Flags; } if (flags != 0) { bool spillGlobEffects = false; if ((flags & GTF_CALL) != 0) { // If there is a call, we have to spill global refs spillGlobEffects = true; } else if (!expr->OperIs(GT_ASG)) { if ((flags & GTF_ASG) != 0) { // The expression is not an assignment node but it has an assignment side effect, it // must be an atomic op, HW intrinsic or some other kind of node that stores to memory. // Since we don't know what it assigns to, we need to spill global refs. spillGlobEffects = true; } } else { GenTree* lhs = expr->gtGetOp1(); GenTree* rhs = expr->gtGetOp2(); if (((rhs->gtFlags | lhs->gtFlags) & GTF_ASG) != 0) { // Either side of the assignment node has an assignment side effect. // Since we don't know what it assigns to, we need to spill global refs. spillGlobEffects = true; } else if ((lhs->gtFlags & GTF_GLOB_REF) != 0) { spillGlobEffects = true; } } impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt")); } else { impSpillSpecialSideEff(); } } impAppendStmtCheck(stmt, chkLevel); impAppendStmt(stmt); #ifdef FEATURE_SIMD impMarkContiguousSIMDFieldAssignments(stmt); #endif // Once we set the current offset as debug info in an appended tree, we are // ready to report the following offsets. Note that we need to compare // offsets here instead of debug info, since we do not set the "is call" // bit in impCurStmtDI. if (checkConsumedDebugInfo && (impLastStmt->GetDebugInfo().GetLocation().GetOffset() == impCurStmtDI.GetLocation().GetOffset())) { impCurStmtOffsSet(BAD_IL_OFFSET); } #ifdef DEBUG if (impLastILoffsStmt == nullptr) { impLastILoffsStmt = stmt; } if (verbose) { printf("\n\n"); gtDispStmt(stmt); } #endif } //------------------------------------------------------------------------ // impAppendStmt: Add the statement to the current stmts list. // // Arguments: // stmt - the statement to add. // inline void Compiler::impAppendStmt(Statement* stmt) { if (impStmtList == nullptr) { // The stmt is the first in the list. impStmtList = stmt; } else { // Append the expression statement to the existing list. impLastStmt->SetNextStmt(stmt); stmt->SetPrevStmt(impLastStmt); } impLastStmt = stmt; } //------------------------------------------------------------------------ // impExtractLastStmt: Extract the last statement from the current stmts list. // // Return Value: // The extracted statement. // // Notes: // It assumes that the stmt will be reinserted later. // Statement* Compiler::impExtractLastStmt() { assert(impLastStmt != nullptr); Statement* stmt = impLastStmt; impLastStmt = impLastStmt->GetPrevStmt(); if (impLastStmt == nullptr) { impStmtList = nullptr; } return stmt; } //------------------------------------------------------------------------- // impInsertStmtBefore: Insert the given "stmt" before "stmtBefore". // // Arguments: // stmt - a statement to insert; // stmtBefore - an insertion point to insert "stmt" before. // inline void Compiler::impInsertStmtBefore(Statement* stmt, Statement* stmtBefore) { assert(stmt != nullptr); assert(stmtBefore != nullptr); if (stmtBefore == impStmtList) { impStmtList = stmt; } else { Statement* stmtPrev = stmtBefore->GetPrevStmt(); stmt->SetPrevStmt(stmtPrev); stmtPrev->SetNextStmt(stmt); } stmt->SetNextStmt(stmtBefore); stmtBefore->SetPrevStmt(stmt); } //------------------------------------------------------------------------ // impAppendTree: Append the given expression tree to the current block's tree list. // // // Arguments: // tree - The tree that will be the root of the newly created statement. // chkLevel - [0..chkLevel) is the portion of the stack which we will check // for interference with stmt and spill if needed. // di - Debug information to associate with the statement. // checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI // marks the debug info of the current boundary and is set when we // start importing IL at that boundary. If this parameter is true, // then the function checks if 'stmt' has been associated with the // current boundary, and if so, clears it so that we do not attach // it to more upcoming statements. // // Return value: // The newly created statement. // Statement* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo) { assert(tree); /* Allocate an 'expression statement' node */ Statement* stmt = gtNewStmt(tree, di); /* Append the statement to the current block's stmt list */ impAppendStmt(stmt, chkLevel, checkConsumedDebugInfo); return stmt; } /***************************************************************************** * * Insert the given expression tree before "stmtBefore" */ void Compiler::impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore) { /* Allocate an 'expression statement' node */ Statement* stmt = gtNewStmt(tree, di); /* Append the statement to the current block's stmt list */ impInsertStmtBefore(stmt, stmtBefore); } /***************************************************************************** * * Append an assignment of the given value to a temp to the current tree list. * curLevel is the stack level for which the spill to the temp is being done. */ void Compiler::impAssignTempGen(unsigned tmp, GenTree* val, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* asg = gtNewTempAssign(tmp, val); if (!asg->IsNothingNode()) { if (pAfterStmt) { Statement* asgStmt = gtNewStmt(asg, di); fgInsertStmtAfter(block, *pAfterStmt, asgStmt); *pAfterStmt = asgStmt; } else { impAppendTree(asg, curLevel, impCurStmtDI); } } } /***************************************************************************** * same as above, but handle the valueclass case too */ void Compiler::impAssignTempGen(unsigned tmpNum, GenTree* val, CORINFO_CLASS_HANDLE structType, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* asg; assert(val->TypeGet() != TYP_STRUCT || structType != NO_CLASS_HANDLE); if (varTypeIsStruct(val) && (structType != NO_CLASS_HANDLE)) { assert(tmpNum < lvaCount); assert(structType != NO_CLASS_HANDLE); // if the method is non-verifiable the assert is not true // so at least ignore it in the case when verification is turned on // since any block that tries to use the temp would have failed verification. var_types varType = lvaTable[tmpNum].lvType; assert(varType == TYP_UNDEF || varTypeIsStruct(varType)); lvaSetStruct(tmpNum, structType, false); varType = lvaTable[tmpNum].lvType; // Now, set the type of the struct value. Note that lvaSetStruct may modify the type // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType) // that has been passed in for the value being assigned to the temp, in which case we // need to set 'val' to that same type. // Note also that if we always normalized the types of any node that might be a struct // type, this would not be necessary - but that requires additional JIT/EE interface // calls that may not actually be required - e.g. if we only access a field of a struct. GenTree* dst = gtNewLclvNode(tmpNum, varType); asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, di, block); } else { asg = gtNewTempAssign(tmpNum, val); } if (!asg->IsNothingNode()) { if (pAfterStmt) { Statement* asgStmt = gtNewStmt(asg, di); fgInsertStmtAfter(block, *pAfterStmt, asgStmt); *pAfterStmt = asgStmt; } else { impAppendTree(asg, curLevel, impCurStmtDI); } } } /***************************************************************************** * * Pop the given number of values from the stack and return a list node with * their values. * The 'prefixTree' argument may optionally contain an argument * list that is prepended to the list returned from this function. * * The notion of prepended is a bit misleading in that the list is backwards * from the way I would expect: The first element popped is at the end of * the returned list, and prefixTree is 'before' that, meaning closer to * the end of the list. To get to prefixTree, you have to walk to the * end of the list. * * For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as * such we reverse its meaning such that returnValue has a reversed * prefixTree at the head of the list. */ GenTreeCall::Use* Compiler::impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs) { assert(sig == nullptr || count == sig->numArgs); CORINFO_CLASS_HANDLE structType; GenTreeCall::Use* argList; if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) { argList = nullptr; } else { // ARG_ORDER_L2R argList = prefixArgs; } while (count--) { StackEntry se = impPopStack(); typeInfo ti = se.seTypeInfo; GenTree* temp = se.val; if (varTypeIsStruct(temp)) { // Morph trees that aren't already OBJs or MKREFANY to be OBJs assert(ti.IsType(TI_STRUCT)); structType = ti.GetClassHandleForValueClass(); bool forceNormalization = false; if (varTypeIsSIMD(temp)) { // We need to ensure that fgMorphArgs will use the correct struct handle to ensure proper // ABI handling of this argument. // Note that this can happen, for example, if we have a SIMD intrinsic that returns a SIMD type // with a different baseType than we've seen. // We also need to ensure an OBJ node if we have a FIELD node that might be transformed to LCL_FLD // or a plain GT_IND. // TODO-Cleanup: Consider whether we can eliminate all of these cases. if ((gtGetStructHandleIfPresent(temp) != structType) || temp->OperIs(GT_FIELD)) { forceNormalization = true; } } #ifdef DEBUG if (verbose) { printf("Calling impNormStructVal on:\n"); gtDispTree(temp); } #endif temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL, forceNormalization); #ifdef DEBUG if (verbose) { printf("resulting tree:\n"); gtDispTree(temp); } #endif } /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */ argList = gtPrependNewCallArg(temp, argList); } if (sig != nullptr) { if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS && sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggerred from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass); } CORINFO_ARG_LIST_HANDLE sigArgs = sig->args; GenTreeCall::Use* arg; for (arg = argList, count = sig->numArgs; count > 0; arg = arg->GetNext(), count--) { PREFIX_ASSUME(arg != nullptr); CORINFO_CLASS_HANDLE classHnd; CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArgs, &classHnd)); var_types jitSigType = JITtype2varType(corType); if (!impCheckImplicitArgumentCoercion(jitSigType, arg->GetNode()->TypeGet())) { BADCODE("the call argument has a type that can't be implicitly converted to the signature type"); } // insert implied casts (from float to double or double to float) if ((jitSigType == TYP_DOUBLE) && (arg->GetNode()->TypeGet() == TYP_FLOAT)) { arg->SetNode(gtNewCastNode(TYP_DOUBLE, arg->GetNode(), false, TYP_DOUBLE)); } else if ((jitSigType == TYP_FLOAT) && (arg->GetNode()->TypeGet() == TYP_DOUBLE)) { arg->SetNode(gtNewCastNode(TYP_FLOAT, arg->GetNode(), false, TYP_FLOAT)); } // insert any widening or narrowing casts for backwards compatibility arg->SetNode(impImplicitIorI4Cast(arg->GetNode(), jitSigType)); if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR && corType != CORINFO_TYPE_VAR) { CORINFO_CLASS_HANDLE argRealClass = info.compCompHnd->getArgClass(sig, sigArgs); if (argRealClass != nullptr) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggered from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass); } } const var_types nodeArgType = arg->GetNode()->TypeGet(); if (!varTypeIsStruct(jitSigType) && genTypeSize(nodeArgType) != genTypeSize(jitSigType)) { assert(!varTypeIsStruct(nodeArgType)); // Some ABI require precise size information for call arguments less than target pointer size, // for example arm64 OSX. Create a special node to keep this information until morph // consumes it into `fgArgInfo`. GenTree* putArgType = gtNewOperNode(GT_PUTARG_TYPE, jitSigType, arg->GetNode()); arg->SetNode(putArgType); } sigArgs = info.compCompHnd->getArgNext(sigArgs); } } if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) { // Prepend the prefixTree // Simple in-place reversal to place treeList // at the end of a reversed prefixTree while (prefixArgs != nullptr) { GenTreeCall::Use* next = prefixArgs->GetNext(); prefixArgs->SetNext(argList); argList = prefixArgs; prefixArgs = next; } } return argList; } static bool TypeIs(var_types type1, var_types type2) { return type1 == type2; } // Check if type1 matches any type from the list. template <typename... T> static bool TypeIs(var_types type1, var_types type2, T... rest) { return TypeIs(type1, type2) || TypeIs(type1, rest...); } //------------------------------------------------------------------------ // impCheckImplicitArgumentCoercion: check that the node's type is compatible with // the signature's type using ECMA implicit argument coercion table. // // Arguments: // sigType - the type in the call signature; // nodeType - the node type. // // Return Value: // true if they are compatible, false otherwise. // // Notes: // - it is currently allowing byref->long passing, should be fixed in VM; // - it can't check long -> native int case on 64-bit platforms, // so the behavior is different depending on the target bitness. // bool Compiler::impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const { if (sigType == nodeType) { return true; } if (TypeIs(sigType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT)) { if (TypeIs(nodeType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT, TYP_I_IMPL)) { return true; } } else if (TypeIs(sigType, TYP_ULONG, TYP_LONG)) { if (TypeIs(nodeType, TYP_LONG)) { return true; } } else if (TypeIs(sigType, TYP_FLOAT, TYP_DOUBLE)) { if (TypeIs(nodeType, TYP_FLOAT, TYP_DOUBLE)) { return true; } } else if (TypeIs(sigType, TYP_BYREF)) { if (TypeIs(nodeType, TYP_I_IMPL)) { return true; } // This condition tolerates such IL: // ; V00 this ref this class-hnd // ldarg.0 // call(byref) if (TypeIs(nodeType, TYP_REF)) { return true; } } else if (varTypeIsStruct(sigType)) { if (varTypeIsStruct(nodeType)) { return true; } } // This condition should not be under `else` because `TYP_I_IMPL` // intersects with `TYP_LONG` or `TYP_INT`. if (TypeIs(sigType, TYP_I_IMPL, TYP_U_IMPL)) { // Note that it allows `ldc.i8 1; call(nint)` on 64-bit platforms, // but we can't distinguish `nint` from `long` there. if (TypeIs(nodeType, TYP_I_IMPL, TYP_U_IMPL, TYP_INT, TYP_UINT)) { return true; } // It tolerates IL that ECMA does not allow but that is commonly used. // Example: // V02 loc1 struct <RTL_OSVERSIONINFOEX, 32> // ldloca.s 0x2 // call(native int) if (TypeIs(nodeType, TYP_BYREF)) { return true; } } return false; } /***************************************************************************** * * Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.) * The first "skipReverseCount" items are not reversed. */ GenTreeCall::Use* Compiler::impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount) { assert(skipReverseCount <= count); GenTreeCall::Use* list = impPopCallArgs(count, sig); // reverse the list if (list == nullptr || skipReverseCount == count) { return list; } GenTreeCall::Use* ptr = nullptr; // Initialized to the first node that needs to be reversed GenTreeCall::Use* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed if (skipReverseCount == 0) { ptr = list; } else { lastSkipNode = list; // Get to the first node that needs to be reversed for (unsigned i = 0; i < skipReverseCount - 1; i++) { lastSkipNode = lastSkipNode->GetNext(); } PREFIX_ASSUME(lastSkipNode != nullptr); ptr = lastSkipNode->GetNext(); } GenTreeCall::Use* reversedList = nullptr; do { GenTreeCall::Use* tmp = ptr->GetNext(); ptr->SetNext(reversedList); reversedList = ptr; ptr = tmp; } while (ptr != nullptr); if (skipReverseCount) { lastSkipNode->SetNext(reversedList); return list; } else { return reversedList; } } //------------------------------------------------------------------------ // impAssignStruct: Create a struct assignment // // Arguments: // dest - the destination of the assignment // src - the value to be assigned // structHnd - handle representing the struct type // curLevel - stack level for which a spill may be being done // pAfterStmt - statement to insert any additional statements after // ilOffset - il offset for new statements // block - block to insert any additional statements in // // Return Value: // The tree that should be appended to the statement list that represents the assignment. // // Notes: // Temp assignments may be appended to impStmtList if spilling is necessary. GenTree* Compiler::impAssignStruct(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt, /* = nullptr */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = nullptr */ ) { assert(varTypeIsStruct(dest)); DebugInfo usedDI = di; if (!usedDI.IsValid()) { usedDI = impCurStmtDI; } while (dest->gtOper == GT_COMMA) { // Second thing is the struct. assert(varTypeIsStruct(dest->AsOp()->gtOp2)); // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree. if (pAfterStmt) { Statement* newStmt = gtNewStmt(dest->AsOp()->gtOp1, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else { impAppendTree(dest->AsOp()->gtOp1, curLevel, usedDI); // do the side effect } // set dest to the second thing dest = dest->AsOp()->gtOp2; } assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD || dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX); // Return a NOP if this is a self-assignment. if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR && src->AsLclVarCommon()->GetLclNum() == dest->AsLclVarCommon()->GetLclNum()) { return gtNewNothingNode(); } // TODO-1stClassStructs: Avoid creating an address if it is not needed, // or re-creating a Blk node if it is. GenTree* destAddr; if (dest->gtOper == GT_IND || dest->OperIsBlk()) { destAddr = dest->AsOp()->gtOp1; } else { destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest); } return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, usedDI, block)); } //------------------------------------------------------------------------ // impAssignStructPtr: Assign (copy) the structure from 'src' to 'destAddr'. // // Arguments: // destAddr - address of the destination of the assignment // src - source of the assignment // structHnd - handle representing the struct type // curLevel - stack level for which a spill may be being done // pAfterStmt - statement to insert any additional statements after // di - debug info for new statements // block - block to insert any additional statements in // // Return Value: // The tree that should be appended to the statement list that represents the assignment. // // Notes: // Temp assignments may be appended to impStmtList if spilling is necessary. GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* dest = nullptr; GenTreeFlags destFlags = GTF_EMPTY; DebugInfo usedDI = di; if (!usedDI.IsValid()) { usedDI = impCurStmtDI; } #ifdef DEBUG #ifdef FEATURE_HW_INTRINSICS if (src->OperIs(GT_HWINTRINSIC)) { const GenTreeHWIntrinsic* intrinsic = src->AsHWIntrinsic(); if (HWIntrinsicInfo::IsMultiReg(intrinsic->GetHWIntrinsicId())) { assert(src->TypeGet() == TYP_STRUCT); } else { assert(varTypeIsSIMD(src)); } } else #endif // FEATURE_HW_INTRINSICS { assert(src->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_FIELD, GT_IND, GT_OBJ, GT_CALL, GT_MKREFANY, GT_RET_EXPR, GT_COMMA) || ((src->TypeGet() != TYP_STRUCT) && src->OperIsSIMD())); } #endif // DEBUG var_types asgType = src->TypeGet(); if (src->gtOper == GT_CALL) { GenTreeCall* srcCall = src->AsCall(); if (srcCall->TreatAsHasRetBufArg(this)) { // Case of call returning a struct via hidden retbuf arg CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_ARM) // Unmanaged instance methods on Windows or Unix X86 need the retbuf arg after the first (this) parameter if ((TargetOS::IsWindows || compUnixX86Abi()) && srcCall->IsUnmanaged()) { if (callConvIsInstanceMethodCallConv(srcCall->GetUnmanagedCallConv())) { #ifdef TARGET_X86 // The argument list has already been reversed. // Insert the return buffer as the second-to-last node // so it will be pushed on to the stack after the user args but before the native this arg // as required by the native ABI. GenTreeCall::Use* lastArg = srcCall->gtCallArgs; if (lastArg == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } else if (srcCall->GetUnmanagedCallConv() == CorInfoCallConvExtension::Thiscall) { // For thiscall, the "this" parameter is not included in the argument list reversal, // so we need to put the return buffer as the last parameter. for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext()) ; gtInsertNewCallArgAfter(destAddr, lastArg); } else if (lastArg->GetNext() == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, lastArg); } else { assert(lastArg != nullptr && lastArg->GetNext() != nullptr); GenTreeCall::Use* secondLastArg = lastArg; lastArg = lastArg->GetNext(); for (; lastArg->GetNext() != nullptr; secondLastArg = lastArg, lastArg = lastArg->GetNext()) ; assert(secondLastArg->GetNext() != nullptr); gtInsertNewCallArgAfter(destAddr, secondLastArg); } #else GenTreeCall::Use* thisArg = gtInsertNewCallArgAfter(destAddr, srcCall->gtCallArgs); #endif } else { #ifdef TARGET_X86 // The argument list has already been reversed. // Insert the return buffer as the last node so it will be pushed on to the stack last // as required by the native ABI. GenTreeCall::Use* lastArg = srcCall->gtCallArgs; if (lastArg == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } else { for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext()) ; gtInsertNewCallArgAfter(destAddr, lastArg); } #else // insert the return value buffer into the argument list as first byref parameter srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); #endif } } else #endif // !defined(TARGET_ARM) { // insert the return value buffer into the argument list as first byref parameter srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } // now returns void, not a struct src->gtType = TYP_VOID; // return the morphed call node return src; } else { // Case of call returning a struct in one or more registers. var_types returnType = (var_types)srcCall->gtReturnType; // First we try to change this to "LclVar/LclFld = call" // if ((destAddr->gtOper == GT_ADDR) && (destAddr->AsOp()->gtOp1->gtOper == GT_LCL_VAR)) { // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD. // That is, the IR will be of the form lclVar = call for multi-reg return // GenTreeLclVar* lcl = destAddr->AsOp()->gtOp1->AsLclVar(); unsigned lclNum = lcl->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (src->AsCall()->HasMultiRegRetVal()) { // Mark the struct LclVar as used in a MultiReg return context // which currently makes it non promotable. // TODO-1stClassStructs: Eliminate this pessimization when we can more generally // handle multireg returns. lcl->gtFlags |= GTF_DONT_CSE; varDsc->lvIsMultiRegRet = true; } dest = lcl; #if defined(TARGET_ARM) // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case, // but that method has not been updadted to include ARM. impMarkLclDstNotPromotable(lclNum, src, structHnd); lcl->gtFlags |= GTF_DONT_CSE; #elif defined(UNIX_AMD64_ABI) // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs. assert(!src->AsCall()->IsVarargs() && "varargs not allowed for System V OSs."); // Make the struct non promotable. The eightbytes could contain multiple fields. // TODO-1stClassStructs: Eliminate this pessimization when we can more generally // handle multireg returns. // TODO-Cleanup: Why is this needed here? This seems that it will set this even for // non-multireg returns. lcl->gtFlags |= GTF_DONT_CSE; varDsc->lvIsMultiRegRet = true; #endif } else // we don't have a GT_ADDR of a GT_LCL_VAR { // !!! The destination could be on stack. !!! // This flag will let us choose the correct write barrier. asgType = returnType; destFlags = GTF_IND_TGTANYWHERE; } } } else if (src->gtOper == GT_RET_EXPR) { GenTreeCall* call = src->AsRetExpr()->gtInlineCandidate->AsCall(); noway_assert(call->gtOper == GT_CALL); if (call->HasRetBufArg()) { // insert the return value buffer into the argument list as first byref parameter call->gtCallArgs = gtPrependNewCallArg(destAddr, call->gtCallArgs); // now returns void, not a struct src->gtType = TYP_VOID; call->gtType = TYP_VOID; // We already have appended the write to 'dest' GT_CALL's args // So now we just return an empty node (pruning the GT_RET_EXPR) return src; } else { // Case of inline method returning a struct in one or more registers. // We won't need a return buffer asgType = src->gtType; if ((destAddr->gtOper != GT_ADDR) || (destAddr->AsOp()->gtOp1->gtOper != GT_LCL_VAR)) { // !!! The destination could be on stack. !!! // This flag will let us choose the correct write barrier. destFlags = GTF_IND_TGTANYWHERE; } } } else if (src->OperIsBlk()) { asgType = impNormStructType(structHnd); if (src->gtOper == GT_OBJ) { assert(src->AsObj()->GetLayout()->GetClassHandle() == structHnd); } } else if (src->gtOper == GT_INDEX) { asgType = impNormStructType(structHnd); assert(src->AsIndex()->gtStructElemClass == structHnd); } else if (src->gtOper == GT_MKREFANY) { // Since we are assigning the result of a GT_MKREFANY, // "destAddr" must point to a refany. GenTree* destAddrClone; destAddr = impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment")); assert(OFFSETOF__CORINFO_TypedReference__dataPtr == 0); assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF); fgAddFieldSeqForZeroOffset(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField())); GenTree* ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr); GenTreeIntCon* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL); typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField()); GenTree* typeSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset)); // append the assign of the pointer value GenTree* asg = gtNewAssignNode(ptrSlot, src->AsOp()->gtOp1); if (pAfterStmt) { Statement* newStmt = gtNewStmt(asg, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else { impAppendTree(asg, curLevel, usedDI); } // return the assign of the type value, to be appended return gtNewAssignNode(typeSlot, src->AsOp()->gtOp2); } else if (src->gtOper == GT_COMMA) { // The second thing is the struct or its address. assert(varTypeIsStruct(src->AsOp()->gtOp2) || src->AsOp()->gtOp2->gtType == TYP_BYREF); if (pAfterStmt) { // Insert op1 after '*pAfterStmt' Statement* newStmt = gtNewStmt(src->AsOp()->gtOp1, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else if (impLastStmt != nullptr) { // Do the side-effect as a separate statement. impAppendTree(src->AsOp()->gtOp1, curLevel, usedDI); } else { // In this case we have neither been given a statement to insert after, nor are we // in the importer where we can append the side effect. // Instead, we're going to sink the assignment below the COMMA. src->AsOp()->gtOp2 = impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block); return src; } // Evaluate the second thing using recursion. return impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block); } else if (src->IsLocal()) { asgType = src->TypeGet(); } else if (asgType == TYP_STRUCT) { // It should already have the appropriate type. assert(asgType == impNormStructType(structHnd)); } if ((dest == nullptr) && (destAddr->OperGet() == GT_ADDR)) { GenTree* destNode = destAddr->gtGetOp1(); // If the actual destination is a local, a GT_INDEX or a block node, or is a node that // will be morphed, don't insert an OBJ(ADDR) if it already has the right type. if (destNode->OperIs(GT_LCL_VAR, GT_INDEX) || destNode->OperIsBlk()) { var_types destType = destNode->TypeGet(); // If one or both types are TYP_STRUCT (one may not yet be normalized), they are compatible // iff their handles are the same. // Otherwise, they are compatible if their types are the same. bool typesAreCompatible = ((destType == TYP_STRUCT) || (asgType == TYP_STRUCT)) ? ((gtGetStructHandleIfPresent(destNode) == structHnd) && varTypeIsStruct(asgType)) : (destType == asgType); if (typesAreCompatible) { dest = destNode; if (destType != TYP_STRUCT) { // Use a normalized type if available. We know from above that they're equivalent. asgType = destType; } } } } if (dest == nullptr) { if (asgType == TYP_STRUCT) { dest = gtNewObjNode(structHnd, destAddr); gtSetObjGcInfo(dest->AsObj()); // Although an obj as a call argument was always assumed to be a globRef // (which is itself overly conservative), that is not true of the operands // of a block assignment. dest->gtFlags &= ~GTF_GLOB_REF; dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF); } else { dest = gtNewOperNode(GT_IND, asgType, destAddr); } } if (dest->OperIs(GT_LCL_VAR) && (src->IsMultiRegNode() || (src->OperIs(GT_RET_EXPR) && src->AsRetExpr()->gtInlineCandidate->AsCall()->HasMultiRegRetVal()))) { if (lvaEnregMultiRegVars && varTypeIsStruct(dest)) { dest->AsLclVar()->SetMultiReg(); } if (src->OperIs(GT_CALL)) { lvaGetDesc(dest->AsLclVar())->lvIsMultiRegRet = true; } } dest->gtFlags |= destFlags; destFlags = dest->gtFlags; // return an assignment node, to be appended GenTree* asgNode = gtNewAssignNode(dest, src); gtBlockOpInit(asgNode, dest, src, false); // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs // of assignments. if ((destFlags & GTF_DONT_CSE) == 0) { dest->gtFlags &= ~(GTF_DONT_CSE); } return asgNode; } /***************************************************************************** Given a struct value, and the class handle for that structure, return the expression for the address for that structure value. willDeref - does the caller guarantee to dereference the pointer. */ GenTree* Compiler::impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref) { assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd)); var_types type = structVal->TypeGet(); genTreeOps oper = structVal->gtOper; if (oper == GT_OBJ && willDeref) { assert(structVal->AsObj()->GetLayout()->GetClassHandle() == structHnd); return (structVal->AsObj()->Addr()); } else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY || structVal->OperIsSimdOrHWintrinsic()) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj")); impAssignTempGen(tmpNum, structVal, structHnd, curLevel); // The 'return value' is now the temp itself type = genActualType(lvaTable[tmpNum].TypeGet()); GenTree* temp = gtNewLclvNode(tmpNum, type); temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp); return temp; } else if (oper == GT_COMMA) { assert(structVal->AsOp()->gtOp2->gtType == type); // Second thing is the struct Statement* oldLastStmt = impLastStmt; structVal->AsOp()->gtOp2 = impGetStructAddr(structVal->AsOp()->gtOp2, structHnd, curLevel, willDeref); structVal->gtType = TYP_BYREF; if (oldLastStmt != impLastStmt) { // Some temp assignment statement was placed on the statement list // for Op2, but that would be out of order with op1, so we need to // spill op1 onto the statement list after whatever was last // before we recursed on Op2 (i.e. before whatever Op2 appended). Statement* beforeStmt; if (oldLastStmt == nullptr) { // The op1 stmt should be the first in the list. beforeStmt = impStmtList; } else { // Insert after the oldLastStmt before the first inserted for op2. beforeStmt = oldLastStmt->GetNextStmt(); } impInsertTreeBefore(structVal->AsOp()->gtOp1, impCurStmtDI, beforeStmt); structVal->AsOp()->gtOp1 = gtNewNothingNode(); } return (structVal); } return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } //------------------------------------------------------------------------ // impNormStructType: Normalize the type of a (known to be) struct class handle. // // Arguments: // structHnd - The class handle for the struct type of interest. // pSimdBaseJitType - (optional, default nullptr) - if non-null, and the struct is a SIMD // type, set to the SIMD base JIT type // // Return Value: // The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*). // It may also modify the compFloatingPointUsed flag if the type is a SIMD type. // // Notes: // Normalizing the type involves examining the struct type to determine if it should // be modified to one that is handled specially by the JIT, possibly being a candidate // for full enregistration, e.g. TYP_SIMD16. If the size of the struct is already known // call structSizeMightRepresentSIMDType to determine if this api needs to be called. var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* pSimdBaseJitType) { assert(structHnd != NO_CLASS_HANDLE); var_types structType = TYP_STRUCT; #ifdef FEATURE_SIMD if (supportSIMDTypes()) { const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd); // Don't bother if the struct contains GC references of byrefs, it can't be a SIMD type. if ((structFlags & (CORINFO_FLG_CONTAINS_GC_PTR | CORINFO_FLG_BYREF_LIKE)) == 0) { unsigned originalSize = info.compCompHnd->getClassSize(structHnd); if (structSizeMightRepresentSIMDType(originalSize)) { unsigned int sizeBytes; CorInfoType simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(structHnd, &sizeBytes); if (simdBaseJitType != CORINFO_TYPE_UNDEF) { assert(sizeBytes == originalSize); structType = getSIMDTypeForSize(sizeBytes); if (pSimdBaseJitType != nullptr) { *pSimdBaseJitType = simdBaseJitType; } // Also indicate that we use floating point registers. compFloatingPointUsed = true; } } } } #endif // FEATURE_SIMD return structType; } //------------------------------------------------------------------------ // Compiler::impNormStructVal: Normalize a struct value // // Arguments: // structVal - the node we are going to normalize // structHnd - the class handle for the node // curLevel - the current stack level // forceNormalization - Force the creation of an OBJ node (default is false). // // Notes: // Given struct value 'structVal', make sure it is 'canonical', that is // it is either: // - a known struct type (non-TYP_STRUCT, e.g. TYP_SIMD8) // - an OBJ or a MKREFANY node, or // - a node (e.g. GT_INDEX) that will be morphed. // If the node is a CALL or RET_EXPR, a copy will be made to a new temp. // GenTree* Compiler::impNormStructVal(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool forceNormalization /*=false*/) { assert(forceNormalization || varTypeIsStruct(structVal)); assert(structHnd != NO_CLASS_HANDLE); var_types structType = structVal->TypeGet(); bool makeTemp = false; if (structType == TYP_STRUCT) { structType = impNormStructType(structHnd); } bool alreadyNormalized = false; GenTreeLclVarCommon* structLcl = nullptr; genTreeOps oper = structVal->OperGet(); switch (oper) { // GT_RETURN and GT_MKREFANY don't capture the handle. case GT_RETURN: break; case GT_MKREFANY: alreadyNormalized = true; break; case GT_CALL: structVal->AsCall()->gtRetClsHnd = structHnd; makeTemp = true; break; case GT_RET_EXPR: structVal->AsRetExpr()->gtRetClsHnd = structHnd; makeTemp = true; break; case GT_ARGPLACE: structVal->AsArgPlace()->gtArgPlaceClsHnd = structHnd; break; case GT_INDEX: // This will be transformed to an OBJ later. alreadyNormalized = true; structVal->AsIndex()->gtStructElemClass = structHnd; structVal->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(structHnd); break; case GT_FIELD: // Wrap it in a GT_OBJ, if needed. structVal->gtType = structType; if ((structType == TYP_STRUCT) || forceNormalization) { structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } break; case GT_LCL_VAR: case GT_LCL_FLD: structLcl = structVal->AsLclVarCommon(); // Wrap it in a GT_OBJ. structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); FALLTHROUGH; case GT_OBJ: case GT_BLK: case GT_ASG: // These should already have the appropriate type. assert(structVal->gtType == structType); alreadyNormalized = true; break; case GT_IND: assert(structVal->gtType == structType); structVal = gtNewObjNode(structHnd, structVal->gtGetOp1()); alreadyNormalized = true; break; #ifdef FEATURE_SIMD case GT_SIMD: assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType)); break; #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: assert(structVal->gtType == structType); assert(varTypeIsSIMD(structVal) || HWIntrinsicInfo::IsMultiReg(structVal->AsHWIntrinsic()->GetHWIntrinsicId())); break; #endif case GT_COMMA: { // The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node. GenTree* blockNode = structVal->AsOp()->gtOp2; assert(blockNode->gtType == structType); // Is this GT_COMMA(op1, GT_COMMA())? GenTree* parent = structVal; if (blockNode->OperGet() == GT_COMMA) { // Find the last node in the comma chain. do { assert(blockNode->gtType == structType); parent = blockNode; blockNode = blockNode->AsOp()->gtOp2; } while (blockNode->OperGet() == GT_COMMA); } if (blockNode->OperGet() == GT_FIELD) { // If we have a GT_FIELD then wrap it in a GT_OBJ. blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode)); } #ifdef FEATURE_SIMD if (blockNode->OperIsSimdOrHWintrinsic()) { parent->AsOp()->gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization); alreadyNormalized = true; } else #endif { noway_assert(blockNode->OperIsBlk()); // Sink the GT_COMMA below the blockNode addr. // That is GT_COMMA(op1, op2=blockNode) is tranformed into // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)). // // In case of a chained GT_COMMA case, we sink the last // GT_COMMA below the blockNode addr. GenTree* blockNodeAddr = blockNode->AsOp()->gtOp1; assert(blockNodeAddr->gtType == TYP_BYREF); GenTree* commaNode = parent; commaNode->gtType = TYP_BYREF; commaNode->AsOp()->gtOp2 = blockNodeAddr; blockNode->AsOp()->gtOp1 = commaNode; if (parent == structVal) { structVal = blockNode; } alreadyNormalized = true; } } break; default: noway_assert(!"Unexpected node in impNormStructVal()"); break; } structVal->gtType = structType; if (!alreadyNormalized || forceNormalization) { if (makeTemp) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj")); impAssignTempGen(tmpNum, structVal, structHnd, curLevel); // The structVal is now the temp itself structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon(); structVal = structLcl; } if ((forceNormalization || (structType == TYP_STRUCT)) && !structVal->OperIsBlk()) { // Wrap it in a GT_OBJ structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } } if (structLcl != nullptr) { // A OBJ on a ADDR(LCL_VAR) can never raise an exception // so we don't set GTF_EXCEPT here. if (!lvaIsImplicitByRefLocal(structLcl->GetLclNum())) { structVal->gtFlags &= ~GTF_GLOB_REF; } } else if (structVal->OperIsBlk()) { // In general a OBJ is an indirection and could raise an exception. structVal->gtFlags |= GTF_EXCEPT; } return structVal; } /******************************************************************************/ // Given a type token, generate code that will evaluate to the correct // handle representation of that token (type handle, field handle, or method handle) // // For most cases, the handle is determined at compile-time, and the code // generated is simply an embedded handle. // // Run-time lookup is required if the enclosing method is shared between instantiations // and the token refers to formal type parameters whose instantiation is not known // at compile-time. // GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup /* = NULL */, bool mustRestoreHandle /* = false */, bool importParent /* = false */) { assert(!fgGlobalMorph); CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo); if (pRuntimeLookup) { *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup; } if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup) { switch (embedInfo.handleType) { case CORINFO_HANDLETYPE_CLASS: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle); break; case CORINFO_HANDLETYPE_METHOD: info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle); break; case CORINFO_HANDLETYPE_FIELD: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun( info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle)); break; default: break; } } // Generate the full lookup tree. May be null if we're abandoning an inline attempt. GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token), embedInfo.compileTimeHandle); // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node. if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup) { result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result); } return result; } GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { if (!pLookup->lookupKind.needsRuntimeLookup) { // No runtime lookup is required. // Access is direct or memory-indirect (of a fixed address) reference CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE); if (pLookup->constLookup.accessType == IAT_VALUE) { handle = pLookup->constLookup.handle; } else if (pLookup->constLookup.accessType == IAT_PVALUE) { pIndirection = pLookup->constLookup.addr; } GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); #ifdef DEBUG size_t handleToTrack; if (handleFlags == GTF_ICON_TOKEN_HDL) { handleToTrack = 0; } else { handleToTrack = (size_t)compileTimeHandle; } if (handle != nullptr) { addr->AsIntCon()->gtTargetHandle = handleToTrack; } else { addr->gtGetOp1()->AsIntCon()->gtTargetHandle = handleToTrack; } #endif return addr; } if (pLookup->lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED) { // Runtime does not support inlining of all shapes of runtime lookups // Inlining has to be aborted in such a case assert(compIsForInlining()); compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP); return nullptr; } // Need to use dictionary-based access which depends on the typeContext // which is only available at runtime, not at compile-time. return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle); } #ifdef FEATURE_READYTORUN GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->accessType != IAT_PPVALUE && pLookup->accessType != IAT_RELPVALUE); if (pLookup->accessType == IAT_VALUE) { handle = pLookup->handle; } else if (pLookup->accessType == IAT_PVALUE) { pIndirection = pLookup->addr; } GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); #ifdef DEBUG assert((handleFlags == GTF_ICON_CLASS_HDL) || (handleFlags == GTF_ICON_METHOD_HDL)); if (handle != nullptr) { addr->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle; } else { addr->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle; } #endif // DEBUG return addr; } //------------------------------------------------------------------------ // impIsCastHelperEligibleForClassProbe: Checks whether a tree is a cast helper eligible to // to be profiled and then optimized with PGO data // // Arguments: // tree - the tree object to check // // Returns: // true if the tree is a cast helper eligible to be profiled // bool Compiler::impIsCastHelperEligibleForClassProbe(GenTree* tree) { if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) || (JitConfig.JitCastProfiling() != 1)) { return false; } if (tree->IsCall() && tree->AsCall()->gtCallType == CT_HELPER) { const CorInfoHelpFunc helper = eeGetHelperNum(tree->AsCall()->gtCallMethHnd); if ((helper == CORINFO_HELP_ISINSTANCEOFINTERFACE) || (helper == CORINFO_HELP_ISINSTANCEOFCLASS) || (helper == CORINFO_HELP_CHKCASTCLASS) || (helper == CORINFO_HELP_CHKCASTINTERFACE)) { return true; } } return false; } //------------------------------------------------------------------------ // impIsCastHelperMayHaveProfileData: Checks whether a tree is a cast helper that might // have profile data // // Arguments: // tree - the tree object to check // // Returns: // true if the tree is a cast helper with potential profile data // bool Compiler::impIsCastHelperMayHaveProfileData(GenTree* tree) { if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT) || (JitConfig.JitCastProfiling() != 1)) { return false; } if (tree->IsCall() && tree->AsCall()->gtCallType == CT_HELPER) { const CorInfoHelpFunc helper = eeGetHelperNum(tree->AsCall()->gtCallMethHnd); if ((helper == CORINFO_HELP_ISINSTANCEOFINTERFACE) || (helper == CORINFO_HELP_ISINSTANCEOFCLASS) || (helper == CORINFO_HELP_CHKCASTCLASS) || (helper == CORINFO_HELP_CHKCASTINTERFACE)) { return true; } } return false; } GenTreeCall* Compiler::impReadyToRunHelperToTree( CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoHelpFunc helper, var_types type, GenTreeCall::Use* args /* = nullptr */, CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */) { CORINFO_CONST_LOOKUP lookup; if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup)) { return nullptr; } GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args); op1->setEntryPoint(lookup); return op1; } #endif GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* op1 = nullptr; switch (pCallInfo->kind) { case CORINFO_CALL: op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod); #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { op1->AsFptrVal()->gtEntryPoint = pCallInfo->codePointerLookup.constLookup; } #endif break; case CORINFO_CALL_CODE_POINTER: op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod); break; default: noway_assert(!"unknown call kind"); break; } return op1; } //------------------------------------------------------------------------ // getRuntimeContextTree: find pointer to context for runtime lookup. // // Arguments: // kind - lookup kind. // // Return Value: // Return GenTree pointer to generic shared context. // // Notes: // Reports about generic context using. GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind) { GenTree* ctxTree = nullptr; // Collectible types requires that for shared generic code, if we use the generic context parameter // that we report it. (This is a conservative approach, we could detect some cases particularly when the // context parameter is this that we don't need the eager reporting logic.) lvaGenericsContextInUse = true; Compiler* pRoot = impInlineRoot(); if (kind == CORINFO_LOOKUP_THISOBJ) { // this Object ctxTree = gtNewLclvNode(pRoot->info.compThisArg, TYP_REF); ctxTree->gtFlags |= GTF_VAR_CONTEXT; // context is the method table pointer of the this object ctxTree = gtNewMethodTableLookup(ctxTree); } else { assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM); // Exact method descriptor as passed in ctxTree = gtNewLclvNode(pRoot->info.compTypeCtxtArg, TYP_I_IMPL); ctxTree->gtFlags |= GTF_VAR_CONTEXT; } return ctxTree; } /*****************************************************************************/ /* Import a dictionary lookup to access a handle in code shared between generic instantiations. The lookup depends on the typeContext which is only available at runtime, and not at compile-time. pLookup->token1 and pLookup->token2 specify the handle that is needed. The cases are: 1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the instantiation-specific handle, and the tokens to lookup the handle. 2. pLookup->indirections != CORINFO_USEHELPER : 2a. pLookup->testForNull == false : Dereference the instantiation-specific handle to get the handle. 2b. pLookup->testForNull == true : Dereference the instantiation-specific handle. If it is non-NULL, it is the handle required. Else, call a helper to lookup the handle. */ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle) { GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind); CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup; // It's available only via the run-time helper function if (pRuntimeLookup->indirections == CORINFO_USEHELPER) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL, gtNewCallArgs(ctxTree), &pLookup->lookupKind); } #endif return gtNewRuntimeLookupHelperCallNode(pRuntimeLookup, ctxTree, compileTimeHandle); } // Slot pointer GenTree* slotPtrTree = ctxTree; if (pRuntimeLookup->testForNull) { slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup slot")); } GenTree* indOffTree = nullptr; GenTree* lastIndOfTree = nullptr; // Applied repeated indirections for (WORD i = 0; i < pRuntimeLookup->indirections; i++) { if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup indirectOffset")); } // The last indirection could be subject to a size check (dynamic dictionary expansion) bool isLastIndirectionWithSizeCheck = ((i == pRuntimeLookup->indirections - 1) && (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK)); if (i != 0) { slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); slotPtrTree->gtFlags |= GTF_IND_NONFAULTING; if (!isLastIndirectionWithSizeCheck) { slotPtrTree->gtFlags |= GTF_IND_INVARIANT; } } if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree); } if (pRuntimeLookup->offsets[i] != 0) { if (isLastIndirectionWithSizeCheck) { lastIndOfTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup indirectOffset")); } slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL)); } } // No null test required if (!pRuntimeLookup->testForNull) { if (pRuntimeLookup->indirections == 0) { return slotPtrTree; } slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); slotPtrTree->gtFlags |= GTF_IND_NONFAULTING; if (!pRuntimeLookup->testForFixup) { return slotPtrTree; } impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0")); unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test")); impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtDI); GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); // downcast the pointer to a TYP_INT on 64-bit targets slot = impImplicitIorI4Cast(slot, TYP_INT); // Use a GT_AND to check for the lowest bit and indirect if it is set GenTree* test = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1)); GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0)); // slot = GT_IND(slot - 1) slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); GenTree* add = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL)); GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add); indir->gtFlags |= GTF_IND_NONFAULTING; indir->gtFlags |= GTF_IND_INVARIANT; slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); GenTree* asg = gtNewAssignNode(slot, indir); GenTreeColon* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg); GenTreeQmark* qmark = gtNewQmarkNode(TYP_VOID, relop, colon); impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); return gtNewLclvNode(slotLclNum, TYP_I_IMPL); } assert(pRuntimeLookup->indirections != 0); impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1")); // Extract the handle GenTree* handleForNullCheck = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); handleForNullCheck->gtFlags |= GTF_IND_NONFAULTING; // Call the helper // - Setup argNode with the pointer to the signature returned by the lookup GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_GLOBAL_PTR, compileTimeHandle); GenTreeCall::Use* helperArgs = gtNewCallArgs(ctxTree, argNode); GenTreeCall* helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs); // Check for null and possibly call helper GenTree* nullCheck = gtNewOperNode(GT_NE, TYP_INT, handleForNullCheck, gtNewIconNode(0, TYP_I_IMPL)); GenTree* handleForResult = gtCloneExpr(handleForNullCheck); GenTree* result = nullptr; if (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK) { // Dynamic dictionary expansion support assert((lastIndOfTree != nullptr) && (pRuntimeLookup->indirections > 0)); // sizeValue = dictionary[pRuntimeLookup->sizeOffset] GenTreeIntCon* sizeOffset = gtNewIconNode(pRuntimeLookup->sizeOffset, TYP_I_IMPL); GenTree* sizeValueOffset = gtNewOperNode(GT_ADD, TYP_I_IMPL, lastIndOfTree, sizeOffset); GenTree* sizeValue = gtNewOperNode(GT_IND, TYP_I_IMPL, sizeValueOffset); sizeValue->gtFlags |= GTF_IND_NONFAULTING; // sizeCheck fails if sizeValue < pRuntimeLookup->offsets[i] GenTree* offsetValue = gtNewIconNode(pRuntimeLookup->offsets[pRuntimeLookup->indirections - 1], TYP_I_IMPL); GenTree* sizeCheck = gtNewOperNode(GT_LE, TYP_INT, sizeValue, offsetValue); // revert null check condition. nullCheck->ChangeOperUnchecked(GT_EQ); // ((sizeCheck fails || nullCheck fails))) ? (helperCall : handle). // Add checks and the handle as call arguments, indirect call transformer will handle this. helperCall->gtCallArgs = gtPrependNewCallArg(handleForResult, helperCall->gtCallArgs); helperCall->gtCallArgs = gtPrependNewCallArg(sizeCheck, helperCall->gtCallArgs); helperCall->gtCallArgs = gtPrependNewCallArg(nullCheck, helperCall->gtCallArgs); result = helperCall; addExpRuntimeLookupCandidate(helperCall); } else { GenTreeColon* colonNullCheck = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, handleForResult, helperCall); result = gtNewQmarkNode(TYP_I_IMPL, nullCheck, colonNullCheck); } unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling Runtime Lookup tree")); impAssignTempGen(tmp, result, (unsigned)CHECK_SPILL_NONE); return gtNewLclvNode(tmp, TYP_I_IMPL); } /****************************************************************************** * Spills the stack at verCurrentState.esStack[level] and replaces it with a temp. * If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum, * else, grab a new temp. * For structs (which can be pushed on the stack using obj, etc), * special handling is needed */ struct RecursiveGuard { public: RecursiveGuard() { m_pAddress = nullptr; } ~RecursiveGuard() { if (m_pAddress) { *m_pAddress = false; } } void Init(bool* pAddress, bool bInitialize) { assert(pAddress && *pAddress == false && "Recursive guard violation"); m_pAddress = pAddress; if (bInitialize) { *m_pAddress = true; } } protected: bool* m_pAddress; }; bool Compiler::impSpillStackEntry(unsigned level, unsigned tnum #ifdef DEBUG , bool bAssertOnRecursion, const char* reason #endif ) { #ifdef DEBUG RecursiveGuard guard; guard.Init(&impNestedStackSpill, bAssertOnRecursion); #endif GenTree* tree = verCurrentState.esStack[level].val; /* Allocate a temp if we haven't been asked to use a particular one */ if (tnum != BAD_VAR_NUM && (tnum >= lvaCount)) { return false; } bool isNewTemp = false; if (tnum == BAD_VAR_NUM) { tnum = lvaGrabTemp(true DEBUGARG(reason)); isNewTemp = true; } /* Assign the spilled entry to the temp */ impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level); // If temp is newly introduced and a ref type, grab what type info we can. if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF)) { assert(lvaTable[tnum].lvSingleDef == 0); lvaTable[tnum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tnum); CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle(); lvaSetClass(tnum, tree, stkHnd); // If we're assigning a GT_RET_EXPR, note the temp over on the call, // so the inliner can use it in case it needs a return spill temp. if (tree->OperGet() == GT_RET_EXPR) { JITDUMP("\n*** see V%02u = GT_RET_EXPR, noting temp\n", tnum); GenTree* call = tree->AsRetExpr()->gtInlineCandidate; InlineCandidateInfo* ici = call->AsCall()->gtInlineCandidateInfo; ici->preexistingSpillTemp = tnum; } } // The tree type may be modified by impAssignTempGen, so use the type of the lclVar. var_types type = genActualType(lvaTable[tnum].TypeGet()); GenTree* temp = gtNewLclvNode(tnum, type); verCurrentState.esStack[level].val = temp; return true; } /***************************************************************************** * * Ensure that the stack has only spilled values */ void Compiler::impSpillStackEnsure(bool spillLeaves) { assert(!spillLeaves || opts.compDbgCode); for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; if (!spillLeaves && tree->OperIsLeaf()) { continue; } // Temps introduced by the importer itself don't need to be spilled bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->AsLclVarCommon()->GetLclNum() >= info.compLocalsCount); if (isTempLcl) { continue; } impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure")); } } void Compiler::impSpillEvalStack() { for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack")); } } /***************************************************************************** * * If the stack contains any trees with side effects in them, assign those * trees to temps and append the assignments to the statement list. * On return the stack is guaranteed to be empty. */ inline void Compiler::impEvalSideEffects() { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects")); verCurrentState.esStackDepth = 0; } /***************************************************************************** * * If the stack contains any trees with side effects in them, assign those * trees to temps and replace them on the stack with refs to their temps. * [0..chkLevel) is the portion of the stack which will be checked and spilled. */ inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)) { assert(chkLevel != (unsigned)CHECK_SPILL_NONE); /* Before we make any appends to the tree list we must spill the * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */ impSpillSpecialSideEff(); if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } assert(chkLevel <= verCurrentState.esStackDepth); GenTreeFlags spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT; for (unsigned i = 0; i < chkLevel; i++) { GenTree* tree = verCurrentState.esStack[i].val; if ((tree->gtFlags & spillFlags) != 0 || (spillGlobEffects && // Only consider the following when spillGlobEffects == true !impIsAddressInLocal(tree) && // No need to spill the GT_ADDR node on a local. gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or // lvAddrTaken flag. { impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason)); } } } /***************************************************************************** * * If the stack contains any trees with special side effects in them, assign * those trees to temps and replace them on the stack with refs to their temps. */ inline void Compiler::impSpillSpecialSideEff() { // Only exception objects need to be carefully handled if (!compCurBB->bbCatchTyp) { return; } for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; // Make sure if we have an exception object in the sub tree we spill ourselves. if (gtHasCatchArg(tree)) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff")); } } } /***************************************************************************** * * Spill all stack references to value classes (TYP_STRUCT nodes) */ void Compiler::impSpillValueClasses() { for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT) { // Tree walk was aborted, which means that we found a // value class on the stack. Need to spill that // stack entry. impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses")); } } } /***************************************************************************** * * Callback that checks if a tree node is TYP_STRUCT */ Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data) { fgWalkResult walkResult = WALK_CONTINUE; if ((*pTree)->gtType == TYP_STRUCT) { // Abort the walk and indicate that we found a value class walkResult = WALK_ABORT; } return walkResult; } /***************************************************************************** * * If the stack contains any trees with references to local #lclNum, assign * those trees to temps and replace their place on the stack with refs to * their temps. */ void Compiler::impSpillLclRefs(ssize_t lclNum) { /* Before we make any appends to the tree list we must spill the * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */ impSpillSpecialSideEff(); for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; /* If the tree may throw an exception, and the block has a handler, then we need to spill assignments to the local if the local is live on entry to the handler. Just spill 'em all without considering the liveness */ bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT)); /* Skip the tree if it doesn't have an affected reference, unless xcptnCaught */ if (xcptnCaught || gtHasRef(tree, lclNum)) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs")); } } } /***************************************************************************** * * Push catch arg onto the stack. * If there are jumps to the beginning of the handler, insert basic block * and spill catch arg to a temp. Update the handler block if necessary. * * Returns the basic block of the actual handler. */ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter) { // Do not inject the basic block twice on reimport. This should be // hit only under JIT stress. See if the block is the one we injected. // Note that EH canonicalization can inject internal blocks here. We might // be able to re-use such a block (but we don't, right now). if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE)) == (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE)) { Statement* stmt = hndBlk->firstStmt(); if (stmt != nullptr) { GenTree* tree = stmt->GetRootNode(); assert(tree != nullptr); if ((tree->gtOper == GT_ASG) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) && (tree->AsOp()->gtOp2->gtOper == GT_CATCH_ARG)) { tree = gtNewLclvNode(tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(), TYP_REF); impPushOnStack(tree, typeInfo(TI_REF, clsHnd)); return hndBlk->bbNext; } } // If we get here, it must have been some other kind of internal block. It's possible that // someone prepended something to our injected block, but that's unlikely. } /* Push the exception address value on the stack */ GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF); /* Mark the node as having a side-effect - i.e. cannot be * moved around since it is tied to a fixed location (EAX) */ arg->gtFlags |= GTF_ORDER_SIDEEFF; #if defined(JIT32_GCENCODER) const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5); #else const bool forceInsertNewBlock = compStressCompile(STRESS_CATCH_ARG, 5); #endif // defined(JIT32_GCENCODER) /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */ if (hndBlk->bbRefs > 1 || forceInsertNewBlock) { if (hndBlk->bbRefs == 1) { hndBlk->bbRefs++; } /* Create extra basic block for the spill */ BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true); newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE; newBlk->inheritWeight(hndBlk); newBlk->bbCodeOffs = hndBlk->bbCodeOffs; /* Account for the new link we are about to create */ hndBlk->bbRefs++; // Spill into a temp. unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg")); lvaTable[tempNum].lvType = TYP_REF; GenTree* argAsg = gtNewTempAssign(tempNum, arg); arg = gtNewLclvNode(tempNum, TYP_REF); hndBlk->bbStkTempsIn = tempNum; Statement* argStmt; if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { // Report the debug info. impImportBlockCode won't treat the actual handler as exception block and thus // won't do it for us. // TODO-DEBUGINFO: Previous code always set stack as non-empty // here. Can we not just use impCurStmtOffsSet? Are we out of sync // here with the stack? impCurStmtDI = DebugInfo(compInlineContext, ILLocation(newBlk->bbCodeOffs, false, false)); argStmt = gtNewStmt(argAsg, impCurStmtDI); } else { argStmt = gtNewStmt(argAsg); } fgInsertStmtAtEnd(newBlk, argStmt); } impPushOnStack(arg, typeInfo(TI_REF, clsHnd)); return hndBlk; } /***************************************************************************** * * Given a tree, clone it. *pClone is set to the cloned tree. * Returns the original tree if the cloning was easy, * else returns the temp to which the tree had to be spilled to. * If the tree has side-effects, it will be spilled to a temp. */ GenTree* Compiler::impCloneExpr(GenTree* tree, GenTree** pClone, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt DEBUGARG(const char* reason)) { if (!(tree->gtFlags & GTF_GLOB_EFFECT)) { GenTree* clone = gtClone(tree, true); if (clone) { *pClone = clone; return tree; } } /* Store the operand in a temp and return the temp */ unsigned temp = lvaGrabTemp(true DEBUGARG(reason)); // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which // return a struct type. It also may modify the struct type to a more // specialized type (e.g. a SIMD type). So we will get the type from // the lclVar AFTER calling impAssignTempGen(). impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtDI); var_types type = genActualType(lvaTable[temp].TypeGet()); *pClone = gtNewLclvNode(temp, type); return gtNewLclvNode(temp, type); } //------------------------------------------------------------------------ // impCreateDIWithCurrentStackInfo: Create a DebugInfo instance with the // specified IL offset and 'is call' bit, using the current stack to determine // whether to set the 'stack empty' bit. // // Arguments: // offs - the IL offset for the DebugInfo // isCall - whether the created DebugInfo should have the IsCall bit set // // Return Value: // The DebugInfo instance. // DebugInfo Compiler::impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall) { assert(offs != BAD_IL_OFFSET); bool isStackEmpty = verCurrentState.esStackDepth <= 0; return DebugInfo(compInlineContext, ILLocation(offs, isStackEmpty, isCall)); } //------------------------------------------------------------------------ // impCurStmtOffsSet: Set the "current debug info" to attach to statements that // we are generating next. // // Arguments: // offs - the IL offset // // Remarks: // This function will be called in the main IL processing loop when it is // determined that we have reached a location in the IL stream for which we // want to report debug information. This is the main way we determine which // statements to report debug info for to the EE: for other statements, they // will have no debug information attached. // inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs) { if (offs == BAD_IL_OFFSET) { impCurStmtDI = DebugInfo(compInlineContext, ILLocation()); } else { impCurStmtDI = impCreateDIWithCurrentStackInfo(offs, false); } } //------------------------------------------------------------------------ // impCanSpillNow: check is it possible to spill all values from eeStack to local variables. // // Arguments: // prevOpcode - last importer opcode // // Return Value: // true if it is legal, false if it could be a sequence that we do not want to divide. bool Compiler::impCanSpillNow(OPCODE prevOpcode) { // Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence. // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed. return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ); } /***************************************************************************** * * Remember the instr offset for the statements * * When we do impAppendTree(tree), we can't set stmt->SetLastILOffset(impCurOpcOffs), * if the append was done because of a partial stack spill, * as some of the trees corresponding to code up to impCurOpcOffs might * still be sitting on the stack. * So we delay calling of SetLastILOffset() until impNoteLastILoffs(). * This should be called when an opcode finally/explicitly causes * impAppendTree(tree) to be called (as opposed to being called because of * a spill caused by the opcode) */ #ifdef DEBUG void Compiler::impNoteLastILoffs() { if (impLastILoffsStmt == nullptr) { // We should have added a statement for the current basic block // Is this assert correct ? assert(impLastStmt); impLastStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); } else { impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); impLastILoffsStmt = nullptr; } } #endif // DEBUG /***************************************************************************** * We don't create any GenTree (excluding spills) for a branch. * For debugging info, we need a placeholder so that we can note * the IL offset in gtStmt.gtStmtOffs. So append an empty statement. */ void Compiler::impNoteBranchOffs() { if (opts.compDbgCode) { impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } } /***************************************************************************** * Locate the next stmt boundary for which we need to record info. * We will have to spill the stack at such boundaries if it is not * already empty. * Returns the next stmt boundary (after the start of the block) */ unsigned Compiler::impInitBlockLineInfo() { /* Assume the block does not correspond with any IL offset. This prevents us from reporting extra offsets. Extra mappings can cause confusing stepping, especially if the extra mapping is a jump-target, and the debugger does not ignore extra mappings, but instead rewinds to the nearest known offset */ impCurStmtOffsSet(BAD_IL_OFFSET); IL_OFFSET blockOffs = compCurBB->bbCodeOffs; if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES)) { impCurStmtOffsSet(blockOffs); } /* Always report IL offset 0 or some tests get confused. Probably a good idea anyways */ if (blockOffs == 0) { impCurStmtOffsSet(blockOffs); } if (!info.compStmtOffsetsCount) { return ~0; } /* Find the lowest explicit stmt boundary within the block */ /* Start looking at an entry that is based on our instr offset */ unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize; if (index >= info.compStmtOffsetsCount) { index = info.compStmtOffsetsCount - 1; } /* If we've guessed too far, back up */ while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs) { index--; } /* If we guessed short, advance ahead */ while (info.compStmtOffsets[index] < blockOffs) { index++; if (index == info.compStmtOffsetsCount) { return info.compStmtOffsetsCount; } } assert(index < info.compStmtOffsetsCount); if (info.compStmtOffsets[index] == blockOffs) { /* There is an explicit boundary for the start of this basic block. So we will start with bbCodeOffs. Else we will wait until we get to the next explicit boundary */ impCurStmtOffsSet(blockOffs); index++; } return index; } /*****************************************************************************/ bool Compiler::impOpcodeIsCallOpcode(OPCODE opcode) { switch (opcode) { case CEE_CALL: case CEE_CALLI: case CEE_CALLVIRT: return true; default: return false; } } /*****************************************************************************/ static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode) { switch (opcode) { case CEE_CALL: case CEE_CALLI: case CEE_CALLVIRT: case CEE_JMP: case CEE_NEWOBJ: case CEE_NEWARR: return true; default: return false; } } /*****************************************************************************/ // One might think it is worth caching these values, but results indicate // that it isn't. // In addition, caching them causes SuperPMI to be unable to completely // encapsulate an individual method context. CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass() { CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr); return refAnyClass; } CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass() { CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE); assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr); return typeHandleClass; } CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle() { CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE); assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr); return argIteratorClass; } CORINFO_CLASS_HANDLE Compiler::impGetStringClass() { CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING); assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr); return stringClass; } CORINFO_CLASS_HANDLE Compiler::impGetObjectClass() { CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT); assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr); return objectClass; } /***************************************************************************** * "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we * set its type to TYP_BYREF when we create it. We know if it can be * changed to TYP_I_IMPL only at the point where we use it */ /* static */ void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2) { if (tree1->IsLocalAddrExpr() != nullptr) { tree1->gtType = TYP_I_IMPL; } if (tree2 && (tree2->IsLocalAddrExpr() != nullptr)) { tree2->gtType = TYP_I_IMPL; } } /***************************************************************************** * TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want * to make that an explicit cast in our trees, so any implicit casts that * exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are * turned into explicit casts here. * We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0) */ GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp) { var_types currType = genActualType(tree->gtType); var_types wantedType = genActualType(dstTyp); if (wantedType != currType) { // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp)) { if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->AsIntCon()->gtIconVal == 0))) { tree->gtType = TYP_I_IMPL; } } #ifdef TARGET_64BIT else if (varTypeIsI(wantedType) && (currType == TYP_INT)) { // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF tree = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } else if ((wantedType == TYP_INT) && varTypeIsI(currType)) { // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT); } #endif // TARGET_64BIT } return tree; } /***************************************************************************** * TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases, * but we want to make that an explicit cast in our trees, so any implicit casts * that exist in the IL are turned into explicit casts here. */ GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp) { if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType)) { tree = gtNewCastNode(dstTyp, tree, false, dstTyp); } return tree; } //------------------------------------------------------------------------ // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray // with a GT_COPYBLK node. // // Arguments: // sig - The InitializeArray signature. // // Return Value: // A pointer to the newly created GT_COPYBLK node if the replacement succeeds or // nullptr otherwise. // // Notes: // The function recognizes the following IL pattern: // ldc <length> or a list of ldc <lower bound>/<length> // newarr or newobj // dup // ldtoken <field handle> // call InitializeArray // The lower bounds need not be constant except when the array rank is 1. // The function recognizes all kinds of arrays thus enabling a small runtime // such as CoreRT to skip providing an implementation for InitializeArray. GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig) { assert(sig->numArgs == 2); GenTree* fieldTokenNode = impStackTop(0).val; GenTree* arrayLocalNode = impStackTop(1).val; // // Verify that the field token is known and valid. Note that It's also // possible for the token to come from reflection, in which case we cannot do // the optimization and must therefore revert to calling the helper. You can // see an example of this in bvt\DynIL\initarray2.exe (in Main). // // Check to see if the ldtoken helper call is what we see here. if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) || (fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD))) { return nullptr; } // Strip helper call away fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode(); if (fieldTokenNode->gtOper == GT_IND) { fieldTokenNode = fieldTokenNode->AsOp()->gtOp1; } // Check for constant if (fieldTokenNode->gtOper != GT_CNS_INT) { return nullptr; } CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle; if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr)) { return nullptr; } // // We need to get the number of elements in the array and the size of each element. // We verify that the newarr statement is exactly what we expect it to be. // If it's not then we just return NULL and we don't optimize this call // // It is possible the we don't have any statements in the block yet. if (impLastStmt == nullptr) { return nullptr; } // // We start by looking at the last statement, making sure it's an assignment, and // that the target of the assignment is the array passed to InitializeArray. // GenTree* arrayAssignment = impLastStmt->GetRootNode(); if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->AsOp()->gtOp1->gtOper != GT_LCL_VAR) || (arrayLocalNode->gtOper != GT_LCL_VAR) || (arrayAssignment->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() != arrayLocalNode->AsLclVarCommon()->GetLclNum())) { return nullptr; } // // Make sure that the object being assigned is a helper call. // GenTree* newArrayCall = arrayAssignment->AsOp()->gtOp2; if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->AsCall()->gtCallType != CT_HELPER)) { return nullptr; } // // Verify that it is one of the new array helpers. // bool isMDArray = false; if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8) #ifdef FEATURE_READYTORUN && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1) #endif ) { if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR)) { return nullptr; } isMDArray = true; } CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->AsCall()->compileTimeHelperArgumentHandle; // // Make sure we found a compile time handle to the array // if (!arrayClsHnd) { return nullptr; } unsigned rank = 0; S_UINT32 numElements; if (isMDArray) { rank = info.compCompHnd->getArrayRank(arrayClsHnd); if (rank == 0) { return nullptr; } GenTreeCall::Use* tokenArg = newArrayCall->AsCall()->gtCallArgs; assert(tokenArg != nullptr); GenTreeCall::Use* numArgsArg = tokenArg->GetNext(); assert(numArgsArg != nullptr); GenTreeCall::Use* argsArg = numArgsArg->GetNext(); assert(argsArg != nullptr); // // The number of arguments should be a constant between 1 and 64. The rank can't be 0 // so at least one length must be present and the rank can't exceed 32 so there can // be at most 64 arguments - 32 lengths and 32 lower bounds. // if ((!numArgsArg->GetNode()->IsCnsIntOrI()) || (numArgsArg->GetNode()->AsIntCon()->IconValue() < 1) || (numArgsArg->GetNode()->AsIntCon()->IconValue() > 64)) { return nullptr; } unsigned numArgs = static_cast<unsigned>(numArgsArg->GetNode()->AsIntCon()->IconValue()); bool lowerBoundsSpecified; if (numArgs == rank * 2) { lowerBoundsSpecified = true; } else if (numArgs == rank) { lowerBoundsSpecified = false; // // If the rank is 1 and a lower bound isn't specified then the runtime creates // a SDArray. Note that even if a lower bound is specified it can be 0 and then // we get a SDArray as well, see the for loop below. // if (rank == 1) { isMDArray = false; } } else { return nullptr; } // // The rank is known to be at least 1 so we can start with numElements being 1 // to avoid the need to special case the first dimension. // numElements = S_UINT32(1); struct Match { static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) && IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs); } static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) && (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) && IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs); } static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) && (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs); } static bool IsComma(GenTree* tree) { return (tree != nullptr) && (tree->OperGet() == GT_COMMA); } }; unsigned argIndex = 0; GenTree* comma; for (comma = argsArg->GetNode(); Match::IsComma(comma); comma = comma->gtGetOp2()) { if (lowerBoundsSpecified) { // // In general lower bounds can be ignored because they're not needed to // calculate the total number of elements. But for single dimensional arrays // we need to know if the lower bound is 0 because in this case the runtime // creates a SDArray and this affects the way the array data offset is calculated. // if (rank == 1) { GenTree* lowerBoundAssign = comma->gtGetOp1(); assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs)); GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2(); if (lowerBoundNode->IsIntegralConst(0)) { isMDArray = false; } } comma = comma->gtGetOp2(); argIndex++; } GenTree* lengthNodeAssign = comma->gtGetOp1(); assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs)); GenTree* lengthNode = lengthNodeAssign->gtGetOp2(); if (!lengthNode->IsCnsIntOrI()) { return nullptr; } numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue()); argIndex++; } assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs)); if (argIndex != numArgs) { return nullptr; } } else { // // Make sure there are exactly two arguments: the array class and // the number of elements. // GenTree* arrayLengthNode; GenTreeCall::Use* args = newArrayCall->AsCall()->gtCallArgs; #ifdef FEATURE_READYTORUN if (newArrayCall->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)) { // Array length is 1st argument for readytorun helper arrayLengthNode = args->GetNode(); } else #endif { // Array length is 2nd argument for regular helper arrayLengthNode = args->GetNext()->GetNode(); } // // This optimization is only valid for a constant array size. // if (arrayLengthNode->gtOper != GT_CNS_INT) { return nullptr; } numElements = S_SIZE_T(arrayLengthNode->AsIntCon()->gtIconVal); if (!info.compCompHnd->isSDArray(arrayClsHnd)) { return nullptr; } } CORINFO_CLASS_HANDLE elemClsHnd; var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd)); // // Note that genTypeSize will return zero for non primitive types, which is exactly // what we want (size will then be 0, and we will catch this in the conditional below). // Note that we don't expect this to fail for valid binaries, so we assert in the // non-verification case (the verification case should not assert but rather correctly // handle bad binaries). This assert is not guarding any specific invariant, but rather // saying that we don't expect this to happen, and if it is hit, we need to investigate // why. // S_UINT32 elemSize(genTypeSize(elementType)); S_UINT32 size = elemSize * S_UINT32(numElements); if (size.IsOverflow()) { return nullptr; } if ((size.Value() == 0) || (varTypeIsGC(elementType))) { return nullptr; } void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value()); if (!initData) { return nullptr; } // // At this point we are ready to commit to implementing the InitializeArray // intrinsic using a struct assignment. Pop the arguments from the stack and // return the struct assignment node. // impPopStack(); impPopStack(); const unsigned blkSize = size.Value(); unsigned dataOffset; if (isMDArray) { dataOffset = eeGetMDArrayDataOffset(rank); } else { dataOffset = eeGetArrayDataOffset(); } GenTree* dstAddr = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL)); GenTree* dst = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, dstAddr, typGetBlkLayout(blkSize)); GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_CONST_PTR, true); #ifdef DEBUG src->gtGetOp1()->AsIntCon()->gtTargetHandle = THT_IntializeArrayIntrinsics; #endif return gtNewBlkOpNode(dst, // dst src, // src false, // volatile true); // copyBlock } GenTree* Compiler::impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig) { assert(sig->numArgs == 1); assert(sig->sigInst.methInstCount == 1); GenTree* fieldTokenNode = impStackTop(0).val; // // Verify that the field token is known and valid. Note that it's also // possible for the token to come from reflection, in which case we cannot do // the optimization and must therefore revert to calling the helper. You can // see an example of this in bvt\DynIL\initarray2.exe (in Main). // // Check to see if the ldtoken helper call is what we see here. if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) || (fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD))) { return nullptr; } // Strip helper call away fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode(); if (fieldTokenNode->gtOper == GT_IND) { fieldTokenNode = fieldTokenNode->AsOp()->gtOp1; } // Check for constant if (fieldTokenNode->gtOper != GT_CNS_INT) { return nullptr; } CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle; if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr)) { return nullptr; } CORINFO_CLASS_HANDLE fieldOwnerHnd = info.compCompHnd->getFieldClass(fieldToken); CORINFO_CLASS_HANDLE fieldClsHnd; var_types fieldElementType = JITtype2varType(info.compCompHnd->getFieldType(fieldToken, &fieldClsHnd, fieldOwnerHnd)); unsigned totalFieldSize; // Most static initialization data fields are of some structure, but it is possible for them to be of various // primitive types as well if (fieldElementType == var_types::TYP_STRUCT) { totalFieldSize = info.compCompHnd->getClassSize(fieldClsHnd); } else { totalFieldSize = genTypeSize(fieldElementType); } // Limit to primitive or enum type - see ArrayNative::GetSpanDataFrom() CORINFO_CLASS_HANDLE targetElemHnd = sig->sigInst.methInst[0]; if (info.compCompHnd->getTypeForPrimitiveValueClass(targetElemHnd) == CORINFO_TYPE_UNDEF) { return nullptr; } const unsigned targetElemSize = info.compCompHnd->getClassSize(targetElemHnd); assert(targetElemSize != 0); const unsigned count = totalFieldSize / targetElemSize; if (count == 0) { return nullptr; } void* data = info.compCompHnd->getArrayInitializationData(fieldToken, totalFieldSize); if (!data) { return nullptr; } // // Ready to commit to the work // impPopStack(); // Turn count and pointer value into constants. GenTree* lengthValue = gtNewIconNode(count, TYP_INT); GenTree* pointerValue = gtNewIconHandleNode((size_t)data, GTF_ICON_CONST_PTR); // Construct ReadOnlySpan<T> to return. CORINFO_CLASS_HANDLE spanHnd = sig->retTypeClass; unsigned spanTempNum = lvaGrabTemp(true DEBUGARG("ReadOnlySpan<T> for CreateSpan<T>")); lvaSetStruct(spanTempNum, spanHnd, false); CORINFO_FIELD_HANDLE pointerFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 0); CORINFO_FIELD_HANDLE lengthFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 1); GenTreeLclFld* pointerField = gtNewLclFldNode(spanTempNum, TYP_BYREF, 0); pointerField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(pointerFieldHnd)); GenTree* pointerFieldAsg = gtNewAssignNode(pointerField, pointerValue); GenTreeLclFld* lengthField = gtNewLclFldNode(spanTempNum, TYP_INT, TARGET_POINTER_SIZE); lengthField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(lengthFieldHnd)); GenTree* lengthFieldAsg = gtNewAssignNode(lengthField, lengthValue); // Now append a few statements the initialize the span impAppendTree(lengthFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); impAppendTree(pointerFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // And finally create a tree that points at the span. return impCreateLocalNode(spanTempNum DEBUGARG(0)); } //------------------------------------------------------------------------ // impIntrinsic: possibly expand intrinsic call into alternate IR sequence // // Arguments: // newobjThis - for constructor calls, the tree for the newly allocated object // clsHnd - handle for the intrinsic method's class // method - handle for the intrinsic method // sig - signature of the intrinsic method // methodFlags - CORINFO_FLG_XXX flags of the intrinsic method // memberRef - the token for the intrinsic method // readonlyCall - true if call has a readonly prefix // tailCall - true if call is in tail position // pConstrainedResolvedToken -- resolved token for constrained call, or nullptr // if call is not constrained // constraintCallThisTransform -- this transform to apply for a constrained call // pIntrinsicName [OUT] -- intrinsic name (see enumeration in namedintrinsiclist.h) // for "traditional" jit intrinsics // isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call // that is amenable to special downstream optimization opportunities // // Returns: // IR tree to use in place of the call, or nullptr if the jit should treat // the intrinsic call like a normal call. // // pIntrinsicName set to non-illegal value if the call is recognized as a // traditional jit intrinsic, even if the intrinsic is not expaned. // // isSpecial set true if the expansion is subject to special // optimizations later in the jit processing // // Notes: // On success the IR tree may be a call to a different method or an inline // sequence. If it is a call, then the intrinsic processing here is responsible // for handling all the special cases, as upon return to impImportCall // expanded intrinsics bypass most of the normal call processing. // // Intrinsics are generally not recognized in minopts and debug codegen. // // However, certain traditional intrinsics are identifed as "must expand" // if there is no fallback implmentation to invoke; these must be handled // in all codegen modes. // // New style intrinsics (where the fallback implementation is in IL) are // identified as "must expand" if they are invoked from within their // own method bodies. // GenTree* Compiler::impIntrinsic(GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef, bool readonlyCall, bool tailCall, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM constraintCallThisTransform, NamedIntrinsic* pIntrinsicName, bool* isSpecialIntrinsic) { assert((methodFlags & CORINFO_FLG_INTRINSIC) != 0); bool mustExpand = false; bool isSpecial = false; NamedIntrinsic ni = NI_Illegal; if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0) { // The recursive non-virtual calls to Jit intrinsics are must-expand by convention. mustExpand = mustExpand || (gtIsRecursiveCall(method) && !(methodFlags & CORINFO_FLG_VIRTUAL)); ni = lookupNamedIntrinsic(method); // We specially support the following on all platforms to allow for dead // code optimization and to more generally support recursive intrinsics. if (ni == NI_IsSupported_True) { assert(sig->numArgs == 0); return gtNewIconNode(true); } if (ni == NI_IsSupported_False) { assert(sig->numArgs == 0); return gtNewIconNode(false); } if (ni == NI_Throw_PlatformNotSupportedException) { return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand); } #ifdef FEATURE_HW_INTRINSICS if ((ni > NI_HW_INTRINSIC_START) && (ni < NI_HW_INTRINSIC_END)) { GenTree* hwintrinsic = impHWIntrinsic(ni, clsHnd, method, sig, mustExpand); if (mustExpand && (hwintrinsic == nullptr)) { return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_NOT_IMPLEMENTED, method, sig, mustExpand); } return hwintrinsic; } if ((ni > NI_SIMD_AS_HWINTRINSIC_START) && (ni < NI_SIMD_AS_HWINTRINSIC_END)) { // These intrinsics aren't defined recursively and so they will never be mustExpand // Instead, they provide software fallbacks that will be executed instead. assert(!mustExpand); return impSimdAsHWIntrinsic(ni, clsHnd, method, sig, newobjThis); } #endif // FEATURE_HW_INTRINSICS } *pIntrinsicName = ni; if (ni == NI_System_StubHelpers_GetStubContext) { // must be done regardless of DbgCode and MinOpts return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL); } if (ni == NI_System_StubHelpers_NextCallReturnAddress) { // For now we just avoid inlining anything into these methods since // this intrinsic is only rarely used. We could do this better if we // wanted to by trying to match which call is the one we need to get // the return address of. info.compHasNextCallRetAddr = true; return new (this, GT_LABEL) GenTree(GT_LABEL, TYP_I_IMPL); } switch (ni) { // CreateSpan must be expanded for NativeAOT case NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan: case NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray: mustExpand |= IsTargetAbi(CORINFO_CORERT_ABI); break; case NI_System_ByReference_ctor: case NI_System_ByReference_get_Value: case NI_System_Activator_AllocatorOf: case NI_System_Activator_DefaultConstructorOf: case NI_System_Object_MethodTableOf: case NI_System_EETypePtr_EETypePtrOf: mustExpand = true; break; default: break; } GenTree* retNode = nullptr; // Under debug and minopts, only expand what is required. // NextCallReturnAddress intrinsic returns the return address of the next call. // If that call is an intrinsic and is expanded, codegen for NextCallReturnAddress will fail. // To avoid that we conservatively expand only required intrinsics in methods that call // the NextCallReturnAddress intrinsic. if (!mustExpand && (opts.OptimizationDisabled() || info.compHasNextCallRetAddr)) { *pIntrinsicName = NI_Illegal; return retNode; } CorInfoType callJitType = sig->retType; var_types callType = JITtype2varType(callJitType); /* First do the intrinsics which are always smaller than a call */ if (ni != NI_Illegal) { assert(retNode == nullptr); switch (ni) { case NI_Array_Address: case NI_Array_Get: case NI_Array_Set: retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, ni); break; case NI_System_String_Equals: { retNode = impStringEqualsOrStartsWith(/*startsWith:*/ false, sig, methodFlags); break; } case NI_System_MemoryExtensions_Equals: case NI_System_MemoryExtensions_SequenceEqual: { retNode = impSpanEqualsOrStartsWith(/*startsWith:*/ false, sig, methodFlags); break; } case NI_System_String_StartsWith: { retNode = impStringEqualsOrStartsWith(/*startsWith:*/ true, sig, methodFlags); break; } case NI_System_MemoryExtensions_StartsWith: { retNode = impSpanEqualsOrStartsWith(/*startsWith:*/ true, sig, methodFlags); break; } case NI_System_MemoryExtensions_AsSpan: case NI_System_String_op_Implicit: { assert(sig->numArgs == 1); isSpecial = impStackTop().val->OperIs(GT_CNS_STR); break; } case NI_System_String_get_Chars: { GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; retNode = gtNewIndexRef(TYP_USHORT, op1, op2); retNode->gtFlags |= GTF_INX_STRING_LAYOUT; break; } case NI_System_String_get_Length: { GenTree* op1 = impPopStack().val; if (op1->OperIs(GT_CNS_STR)) { // Optimize `ldstr + String::get_Length()` to CNS_INT // e.g. "Hello".Length => 5 GenTreeIntCon* iconNode = gtNewStringLiteralLength(op1->AsStrCon()); if (iconNode != nullptr) { retNode = iconNode; break; } } GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_String__stringLen, compCurBB); op1 = arrLen; // Getting the length of a null string should throw op1->gtFlags |= GTF_EXCEPT; retNode = op1; break; } // Implement ByReference Ctor. This wraps the assignment of the ref into a byref-like field // in a value type. The canonical example of this is Span<T>. In effect this is just a // substitution. The parameter byref will be assigned into the newly allocated object. case NI_System_ByReference_ctor: { // Remove call to constructor and directly assign the byref passed // to the call to the first slot of the ByReference struct. GenTree* op1 = impPopStack().val; GenTree* thisptr = newobjThis; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0); GenTree* assign = gtNewAssignNode(field, op1); GenTree* byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1()); assert(byReferenceStruct != nullptr); impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd)); retNode = assign; break; } // Implement ptr value getter for ByReference struct. case NI_System_ByReference_get_Value: { GenTree* op1 = impPopStack().val; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0); retNode = field; break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan: { retNode = impCreateSpanIntrinsic(sig); break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray: { retNode = impInitializeArrayIntrinsic(sig); break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant: { GenTree* op1 = impPopStack().val; if (op1->OperIsConst()) { // op1 is a known constant, replace with 'true'. retNode = gtNewIconNode(1); JITDUMP("\nExpanding RuntimeHelpers.IsKnownConstant to true early\n"); // We can also consider FTN_ADDR and typeof(T) here } else { // op1 is not a known constant, we'll do the expansion in morph retNode = new (this, GT_INTRINSIC) GenTreeIntrinsic(TYP_INT, op1, ni, method); JITDUMP("\nConverting RuntimeHelpers.IsKnownConstant to:\n"); DISPTREE(retNode); } break; } case NI_System_Activator_AllocatorOf: case NI_System_Activator_DefaultConstructorOf: case NI_System_Object_MethodTableOf: case NI_System_EETypePtr_EETypePtrOf: { assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it. CORINFO_RESOLVED_TOKEN resolvedToken; resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; resolvedToken.token = memberRef; resolvedToken.tokenType = CORINFO_TOKENKIND_Method; CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo); GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef), embedInfo.compileTimeHandle); if (rawHandle == nullptr) { return nullptr; } noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL)); unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle")); impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE); GenTree* lclVar = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL); GenTree* lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar); var_types resultType = JITtype2varType(sig->retType); retNode = gtNewOperNode(GT_IND, resultType, lclVarAddr); break; } case NI_System_Span_get_Item: case NI_System_ReadOnlySpan_get_Item: { // Have index, stack pointer-to Span<T> s on the stack. Expand to: // // For Span<T> // Comma // BoundsCheck(index, s->_length) // s->_pointer + index * sizeof(T) // // For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref // // Signature should show one class type parameter, which // we need to examine. assert(sig->sigInst.classInstCount == 1); assert(sig->numArgs == 1); CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0]; const unsigned elemSize = info.compCompHnd->getClassSize(spanElemHnd); assert(elemSize > 0); const bool isReadOnly = (ni == NI_System_ReadOnlySpan_get_Item); JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "", info.compCompHnd->getClassName(spanElemHnd), elemSize); GenTree* index = impPopStack().val; GenTree* ptrToSpan = impPopStack().val; GenTree* indexClone = nullptr; GenTree* ptrToSpanClone = nullptr; assert(genActualType(index) == TYP_INT); assert(ptrToSpan->TypeGet() == TYP_BYREF); #if defined(DEBUG) if (verbose) { printf("with ptr-to-span\n"); gtDispTree(ptrToSpan); printf("and index\n"); gtDispTree(index); } #endif // defined(DEBUG) // We need to use both index and ptr-to-span twice, so clone or spill. index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Span.get_Item index")); ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Span.get_Item ptrToSpan")); // Bounds check CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(clsHnd, 1); const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd); GenTree* length = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset); GenTree* boundsCheck = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, length, SCK_RNGCHK_FAIL); // Element access index = indexClone; #ifdef TARGET_64BIT if (index->OperGet() == GT_CNS_INT) { index->gtType = TYP_I_IMPL; } else { index = gtNewCastNode(TYP_I_IMPL, index, true, TYP_I_IMPL); } #endif if (elemSize != 1) { GenTree* sizeofNode = gtNewIconNode(static_cast<ssize_t>(elemSize), TYP_I_IMPL); index = gtNewOperNode(GT_MUL, TYP_I_IMPL, index, sizeofNode); } CORINFO_FIELD_HANDLE ptrHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); const unsigned ptrOffset = info.compCompHnd->getFieldOffset(ptrHnd); GenTree* data = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset); GenTree* result = gtNewOperNode(GT_ADD, TYP_BYREF, data, index); // Prepare result var_types resultType = JITtype2varType(sig->retType); assert(resultType == result->TypeGet()); retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result); break; } case NI_System_RuntimeTypeHandle_GetValueInternal: { GenTree* op1 = impStackTop(0).val; if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall())) { // Old tree // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle // // New tree // TreeToGetNativeTypeHandle // Remove call to helper and return the native TypeHandle pointer that was the parameter // to that helper. op1 = impPopStack().val; // Get native TypeHandle argument to old helper GenTreeCall::Use* arg = op1->AsCall()->gtCallArgs; assert(arg->GetNext() == nullptr); op1 = arg->GetNode(); retNode = op1; } // Call the regular function. break; } case NI_System_Type_GetTypeFromHandle: { GenTree* op1 = impStackTop(0).val; CorInfoHelpFunc typeHandleHelper; if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall(), &typeHandleHelper)) { op1 = impPopStack().val; // Replace helper with a more specialized helper that returns RuntimeType if (typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE) { typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE; } else { assert(typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL); typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL; } assert(op1->AsCall()->gtCallArgs->GetNext() == nullptr); op1 = gtNewHelperCallNode(typeHandleHelper, TYP_REF, op1->AsCall()->gtCallArgs); op1->gtType = TYP_REF; retNode = op1; } break; } case NI_System_Type_op_Equality: case NI_System_Type_op_Inequality: { JITDUMP("Importing Type.op_*Equality intrinsic\n"); GenTree* op1 = impStackTop(1).val; GenTree* op2 = impStackTop(0).val; GenTree* optTree = gtFoldTypeEqualityCall(ni == NI_System_Type_op_Equality, op1, op2); if (optTree != nullptr) { // Success, clean up the evaluation stack. impPopStack(); impPopStack(); // See if we can optimize even further, to a handle compare. optTree = gtFoldTypeCompare(optTree); // See if we can now fold a handle compare to a constant. optTree = gtFoldExpr(optTree); retNode = optTree; } else { // Retry optimizing these later isSpecial = true; } break; } case NI_System_Enum_HasFlag: { GenTree* thisOp = impStackTop(1).val; GenTree* flagOp = impStackTop(0).val; GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp); if (optTree != nullptr) { // Optimization successful. Pop the stack for real. impPopStack(); impPopStack(); retNode = optTree; } else { // Retry optimizing this during morph. isSpecial = true; } break; } case NI_System_Type_IsAssignableFrom: { GenTree* typeTo = impStackTop(1).val; GenTree* typeFrom = impStackTop(0).val; retNode = impTypeIsAssignable(typeTo, typeFrom); break; } case NI_System_Type_IsAssignableTo: { GenTree* typeTo = impStackTop(0).val; GenTree* typeFrom = impStackTop(1).val; retNode = impTypeIsAssignable(typeTo, typeFrom); break; } case NI_System_Type_get_IsValueType: { // Optimize // // call Type.GetTypeFromHandle (which is replaced with CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE) // call Type.IsValueType // // to `true` or `false` // e.g. `typeof(int).IsValueType` => `true` if (impStackTop().val->IsCall()) { GenTreeCall* call = impStackTop().val->AsCall(); if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE)) { CORINFO_CLASS_HANDLE hClass = gtGetHelperArgClassHandle(call->gtCallArgs->GetNode()); if (hClass != NO_CLASS_HANDLE) { retNode = gtNewIconNode((eeIsValueClass(hClass) && // pointers are not value types (e.g. typeof(int*).IsValueType is false) info.compCompHnd->asCorInfoType(hClass) != CORINFO_TYPE_PTR) ? 1 : 0); impPopStack(); // drop CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE call } } } break; } case NI_System_Threading_Thread_get_ManagedThreadId: { if (impStackTop().val->OperIs(GT_RET_EXPR)) { GenTreeCall* call = impStackTop().val->AsRetExpr()->gtInlineCandidate->AsCall(); if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) { if (lookupNamedIntrinsic(call->gtCallMethHnd) == NI_System_Threading_Thread_get_CurrentThread) { // drop get_CurrentThread() call impPopStack(); call->ReplaceWith(gtNewNothingNode(), this); retNode = gtNewHelperCallNode(CORINFO_HELP_GETCURRENTMANAGEDTHREADID, TYP_INT); } } } break; } #ifdef TARGET_ARM64 // Intrinsify Interlocked.Or and Interlocked.And only for arm64-v8.1 (and newer) // TODO-CQ: Implement for XArch (https://github.com/dotnet/runtime/issues/32239). case NI_System_Threading_Interlocked_Or: case NI_System_Threading_Interlocked_And: { if (compOpportunisticallyDependsOn(InstructionSet_Atomics)) { assert(sig->numArgs == 2); GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; genTreeOps op = (ni == NI_System_Threading_Interlocked_Or) ? GT_XORR : GT_XAND; retNode = gtNewOperNode(op, genActualType(callType), op1, op2); retNode->gtFlags |= GTF_GLOB_REF | GTF_ASG; } break; } #endif // TARGET_ARM64 #if defined(TARGET_XARCH) || defined(TARGET_ARM64) // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic case NI_System_Threading_Interlocked_CompareExchange: { var_types retType = JITtype2varType(sig->retType); if ((retType == TYP_LONG) && (TARGET_POINTER_SIZE == 4)) { break; } if ((retType != TYP_INT) && (retType != TYP_LONG)) { break; } assert(callType != TYP_STRUCT); assert(sig->numArgs == 3); GenTree* op3 = impPopStack().val; // comparand GenTree* op2 = impPopStack().val; // value GenTree* op1 = impPopStack().val; // location GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3); node->AsCmpXchg()->gtOpLocation->gtFlags |= GTF_DONT_CSE; retNode = node; break; } case NI_System_Threading_Interlocked_Exchange: case NI_System_Threading_Interlocked_ExchangeAdd: { assert(callType != TYP_STRUCT); assert(sig->numArgs == 2); var_types retType = JITtype2varType(sig->retType); if ((retType == TYP_LONG) && (TARGET_POINTER_SIZE == 4)) { break; } if ((retType != TYP_INT) && (retType != TYP_LONG)) { break; } GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; // This creates: // val // XAdd // addr // field (for example) // // In the case where the first argument is the address of a local, we might // want to make this *not* make the var address-taken -- but atomic instructions // on a local are probably pretty useless anyway, so we probably don't care. op1 = gtNewOperNode(ni == NI_System_Threading_Interlocked_ExchangeAdd ? GT_XADD : GT_XCHG, genActualType(callType), op1, op2); op1->gtFlags |= GTF_GLOB_REF | GTF_ASG; retNode = op1; break; } #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) case NI_System_Threading_Interlocked_MemoryBarrier: case NI_System_Threading_Interlocked_ReadMemoryBarrier: { assert(sig->numArgs == 0); GenTree* op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID); op1->gtFlags |= GTF_GLOB_REF | GTF_ASG; // On XARCH `NI_System_Threading_Interlocked_ReadMemoryBarrier` fences need not be emitted. // However, we still need to capture the effect on reordering. if (ni == NI_System_Threading_Interlocked_ReadMemoryBarrier) { op1->gtFlags |= GTF_MEMORYBARRIER_LOAD; } retNode = op1; break; } #ifdef FEATURE_HW_INTRINSICS case NI_System_Math_FusedMultiplyAdd: { #ifdef TARGET_XARCH if (compExactlyDependsOn(InstructionSet_FMA) && supportSIMDTypes()) { assert(varTypeIsFloating(callType)); // We are constructing a chain of intrinsics similar to: // return FMA.MultiplyAddScalar( // Vector128.CreateScalarUnsafe(x), // Vector128.CreateScalarUnsafe(y), // Vector128.CreateScalarUnsafe(z) // ).ToScalar(); GenTree* op3 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* res = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, op3, NI_FMA_MultiplyAddScalar, callJitType, 16); retNode = gtNewSimdHWIntrinsicNode(callType, res, NI_Vector128_ToScalar, callJitType, 16); break; } #elif defined(TARGET_ARM64) if (compExactlyDependsOn(InstructionSet_AdvSimd)) { assert(varTypeIsFloating(callType)); // We are constructing a chain of intrinsics similar to: // return AdvSimd.FusedMultiplyAddScalar( // Vector64.Create{ScalarUnsafe}(z), // Vector64.Create{ScalarUnsafe}(y), // Vector64.Create{ScalarUnsafe}(x) // ).ToScalar(); NamedIntrinsic createVector64 = (callType == TYP_DOUBLE) ? NI_Vector64_Create : NI_Vector64_CreateScalarUnsafe; constexpr unsigned int simdSize = 8; GenTree* op3 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); GenTree* op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); GenTree* op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); // Note that AdvSimd.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 + op2 * op3 // while Math{F}.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 * op2 + op3 retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op3, op2, op1, NI_AdvSimd_FusedMultiplyAddScalar, callJitType, simdSize); retNode = gtNewSimdHWIntrinsicNode(callType, retNode, NI_Vector64_ToScalar, callJitType, simdSize); break; } #endif // TODO-CQ-XArch: Ideally we would create a GT_INTRINSIC node for fma, however, that currently // requires more extensive changes to valuenum to support methods with 3 operands // We want to generate a GT_INTRINSIC node in the case the call can't be treated as // a target intrinsic so that we can still benefit from CSE and constant folding. break; } #endif // FEATURE_HW_INTRINSICS case NI_System_Math_Abs: case NI_System_Math_Acos: case NI_System_Math_Acosh: case NI_System_Math_Asin: case NI_System_Math_Asinh: case NI_System_Math_Atan: case NI_System_Math_Atanh: case NI_System_Math_Atan2: case NI_System_Math_Cbrt: case NI_System_Math_Ceiling: case NI_System_Math_Cos: case NI_System_Math_Cosh: case NI_System_Math_Exp: case NI_System_Math_Floor: case NI_System_Math_FMod: case NI_System_Math_ILogB: case NI_System_Math_Log: case NI_System_Math_Log2: case NI_System_Math_Log10: #ifdef TARGET_ARM64 // ARM64 has fmax/fmin which are IEEE754:2019 minimum/maximum compatible // TODO-XARCH-CQ: Enable this for XARCH when one of the arguments is a constant // so we can then emit maxss/minss and avoid NaN/-0.0 handling case NI_System_Math_Max: case NI_System_Math_Min: #endif case NI_System_Math_Pow: case NI_System_Math_Round: case NI_System_Math_Sin: case NI_System_Math_Sinh: case NI_System_Math_Sqrt: case NI_System_Math_Tan: case NI_System_Math_Tanh: case NI_System_Math_Truncate: { retNode = impMathIntrinsic(method, sig, callType, ni, tailCall); break; } case NI_System_Array_Clone: case NI_System_Collections_Generic_Comparer_get_Default: case NI_System_Collections_Generic_EqualityComparer_get_Default: case NI_System_Object_MemberwiseClone: case NI_System_Threading_Thread_get_CurrentThread: { // Flag for later handling. isSpecial = true; break; } case NI_System_Object_GetType: { JITDUMP("\n impIntrinsic: call to Object.GetType\n"); GenTree* op1 = impStackTop(0).val; // If we're calling GetType on a boxed value, just get the type directly. if (op1->IsBoxedValue()) { JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n"); // Try and clean up the box. Obtain the handle we // were going to pass to the newobj. GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE); if (boxTypeHandle != nullptr) { // Note we don't need to play the TYP_STRUCT games here like // do for LDTOKEN since the return value of this operator is Type, // not RuntimeTypeHandle. impPopStack(); GenTreeCall::Use* helperArgs = gtNewCallArgs(boxTypeHandle); GenTree* runtimeType = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs); retNode = runtimeType; } } // If we have a constrained callvirt with a "box this" transform // we know we have a value class and hence an exact type. // // If so, instead of boxing and then extracting the type, just // construct the type directly. if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) && (constraintCallThisTransform == CORINFO_BOX_THIS)) { // Ensure this is one of the is simple box cases (in particular, rule out nullables). const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass); const bool isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX); if (isSafeToOptimize) { JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n"); impPopStack(); GenTree* typeHandleOp = impTokenToHandle(pConstrainedResolvedToken, nullptr, true /* mustRestoreHandle */); if (typeHandleOp == nullptr) { assert(compDonotInline()); return nullptr; } GenTreeCall::Use* helperArgs = gtNewCallArgs(typeHandleOp); GenTree* runtimeType = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs); retNode = runtimeType; } } #ifdef DEBUG if (retNode != nullptr) { JITDUMP("Optimized result for call to GetType is\n"); if (verbose) { gtDispTree(retNode); } } #endif // Else expand as an intrinsic, unless the call is constrained, // in which case we defer expansion to allow impImportCall do the // special constraint processing. if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr)) { JITDUMP("Expanding as special intrinsic\n"); impPopStack(); op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, ni, method); // Set the CALL flag to indicate that the operator is implemented by a call. // Set also the EXCEPTION flag because the native implementation of // NI_System_Object_GetType intrinsic can throw NullReferenceException. op1->gtFlags |= (GTF_CALL | GTF_EXCEPT); retNode = op1; // Might be further optimizable, so arrange to leave a mark behind isSpecial = true; } if (retNode == nullptr) { JITDUMP("Leaving as normal call\n"); // Might be further optimizable, so arrange to leave a mark behind isSpecial = true; } break; } case NI_System_Array_GetLength: case NI_System_Array_GetLowerBound: case NI_System_Array_GetUpperBound: { // System.Array.GetLength(Int32) method: // public int GetLength(int dimension) // System.Array.GetLowerBound(Int32) method: // public int GetLowerBound(int dimension) // System.Array.GetUpperBound(Int32) method: // public int GetUpperBound(int dimension) // // Only implement these as intrinsics for multi-dimensional arrays. // Only handle constant dimension arguments. GenTree* gtDim = impStackTop().val; GenTree* gtArr = impStackTop(1).val; if (gtDim->IsIntegralConst()) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE arrCls = gtGetClassHandle(gtArr, &isExact, &isNonNull); if (arrCls != NO_CLASS_HANDLE) { unsigned rank = info.compCompHnd->getArrayRank(arrCls); if ((rank > 1) && !info.compCompHnd->isSDArray(arrCls)) { // `rank` is guaranteed to be <=32 (see MAX_RANK in vm\array.h). Any constant argument // is `int` sized. INT64 dimValue = gtDim->AsIntConCommon()->IntegralValue(); assert((unsigned int)dimValue == dimValue); unsigned dim = (unsigned int)dimValue; if (dim < rank) { // This is now known to be a multi-dimension array with a constant dimension // that is in range; we can expand it as an intrinsic. impPopStack().val; // Pop the dim and array object; we already have a pointer to them. impPopStack().val; // Make sure there are no global effects in the array (such as it being a function // call), so we can mark the generated indirection with GTF_IND_INVARIANT. In the // GetUpperBound case we need the cloned object, since we refer to the array // object twice. In the other cases, we don't need to clone. GenTree* gtArrClone = nullptr; if (((gtArr->gtFlags & GTF_GLOB_EFFECT) != 0) || (ni == NI_System_Array_GetUpperBound)) { gtArr = impCloneExpr(gtArr, &gtArrClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("MD intrinsics array")); } switch (ni) { case NI_System_Array_GetLength: { // Generate *(array + offset-to-length-array + sizeof(int) * dim) unsigned offs = eeGetMDArrayLengthOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); retNode = gtNewIndir(TYP_INT, gtAddr); retNode->gtFlags |= GTF_IND_INVARIANT; break; } case NI_System_Array_GetLowerBound: { // Generate *(array + offset-to-bounds-array + sizeof(int) * dim) unsigned offs = eeGetMDArrayLowerBoundOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); retNode = gtNewIndir(TYP_INT, gtAddr); retNode->gtFlags |= GTF_IND_INVARIANT; break; } case NI_System_Array_GetUpperBound: { assert(gtArrClone != nullptr); // Generate: // *(array + offset-to-length-array + sizeof(int) * dim) + // *(array + offset-to-bounds-array + sizeof(int) * dim) - 1 unsigned offs = eeGetMDArrayLowerBoundOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); GenTree* gtLowerBound = gtNewIndir(TYP_INT, gtAddr); gtLowerBound->gtFlags |= GTF_IND_INVARIANT; offs = eeGetMDArrayLengthOffset(rank, dim); gtOffs = gtNewIconNode(offs, TYP_I_IMPL); gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArrClone, gtOffs); GenTree* gtLength = gtNewIndir(TYP_INT, gtAddr); gtLength->gtFlags |= GTF_IND_INVARIANT; GenTree* gtSum = gtNewOperNode(GT_ADD, TYP_INT, gtLowerBound, gtLength); GenTree* gtOne = gtNewIconNode(1, TYP_INT); retNode = gtNewOperNode(GT_SUB, TYP_INT, gtSum, gtOne); break; } default: unreached(); } } } } } break; } case NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness: { assert(sig->numArgs == 1); // We expect the return type of the ReverseEndianness routine to match the type of the // one and only argument to the method. We use a special instruction for 16-bit // BSWAPs since on x86 processors this is implemented as ROR <16-bit reg>, 8. Additionally, // we only emit 64-bit BSWAP instructions on 64-bit archs; if we're asked to perform a // 64-bit byte swap on a 32-bit arch, we'll fall to the default case in the switch block below. switch (sig->retType) { case CorInfoType::CORINFO_TYPE_SHORT: case CorInfoType::CORINFO_TYPE_USHORT: retNode = gtNewCastNode(TYP_INT, gtNewOperNode(GT_BSWAP16, TYP_INT, impPopStack().val), false, callType); break; case CorInfoType::CORINFO_TYPE_INT: case CorInfoType::CORINFO_TYPE_UINT: #ifdef TARGET_64BIT case CorInfoType::CORINFO_TYPE_LONG: case CorInfoType::CORINFO_TYPE_ULONG: #endif // TARGET_64BIT retNode = gtNewOperNode(GT_BSWAP, callType, impPopStack().val); break; default: // This default case gets hit on 32-bit archs when a call to a 64-bit overload // of ReverseEndianness is encountered. In that case we'll let JIT treat this as a standard // method call, where the implementation decomposes the operation into two 32-bit // bswap routines. If the input to the 64-bit function is a constant, then we rely // on inlining + constant folding of 32-bit bswaps to effectively constant fold // the 64-bit call site. break; } break; } // Fold PopCount for constant input case NI_System_Numerics_BitOperations_PopCount: { assert(sig->numArgs == 1); if (impStackTop().val->IsIntegralConst()) { typeInfo argType = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack(); INT64 cns = impPopStack().val->AsIntConCommon()->IntegralValue(); if (argType.IsType(TI_LONG)) { retNode = gtNewIconNode(genCountBits(cns), callType); } else { assert(argType.IsType(TI_INT)); retNode = gtNewIconNode(genCountBits(static_cast<unsigned>(cns)), callType); } } break; } case NI_System_GC_KeepAlive: { retNode = impKeepAliveIntrinsic(impPopStack().val); break; } default: break; } } if (mustExpand && (retNode == nullptr)) { assert(!"Unhandled must expand intrinsic, throwing PlatformNotSupportedException"); return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand); } // Optionally report if this intrinsic is special // (that is, potentially re-optimizable during morph). if (isSpecialIntrinsic != nullptr) { *isSpecialIntrinsic = isSpecial; } return retNode; } GenTree* Compiler::impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom) { // Optimize patterns like: // // typeof(TTo).IsAssignableFrom(typeof(TTFrom)) // valueTypeVar.GetType().IsAssignableFrom(typeof(TTFrom)) // typeof(TTFrom).IsAssignableTo(typeof(TTo)) // typeof(TTFrom).IsAssignableTo(valueTypeVar.GetType()) // // to true/false if (typeTo->IsCall() && typeFrom->IsCall()) { // make sure both arguments are `typeof()` CORINFO_METHOD_HANDLE hTypeof = eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE); if ((typeTo->AsCall()->gtCallMethHnd == hTypeof) && (typeFrom->AsCall()->gtCallMethHnd == hTypeof)) { CORINFO_CLASS_HANDLE hClassTo = gtGetHelperArgClassHandle(typeTo->AsCall()->gtCallArgs->GetNode()); CORINFO_CLASS_HANDLE hClassFrom = gtGetHelperArgClassHandle(typeFrom->AsCall()->gtCallArgs->GetNode()); if (hClassTo == NO_CLASS_HANDLE || hClassFrom == NO_CLASS_HANDLE) { return nullptr; } TypeCompareState castResult = info.compCompHnd->compareTypesForCast(hClassFrom, hClassTo); if (castResult == TypeCompareState::May) { // requires runtime check // e.g. __Canon, COMObjects, Nullable return nullptr; } GenTreeIntCon* retNode = gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0); impPopStack(); // drop both CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE calls impPopStack(); return retNode; } } return nullptr; } GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, var_types callType, NamedIntrinsic intrinsicName, bool tailCall) { GenTree* op1; GenTree* op2; assert(callType != TYP_STRUCT); assert(IsMathIntrinsic(intrinsicName)); op1 = nullptr; #if !defined(TARGET_X86) // Intrinsics that are not implemented directly by target instructions will // be re-materialized as users calls in rationalizer. For prefixed tail calls, // don't do this optimization, because // a) For back compatibility reasons on desktop .NET Framework 4.6 / 4.6.1 // b) It will be non-trivial task or too late to re-materialize a surviving // tail prefixed GT_INTRINSIC as tail call in rationalizer. if (!IsIntrinsicImplementedByUserCall(intrinsicName) || !tailCall) #else // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad // code generation for certain EH constructs. if (!IsIntrinsicImplementedByUserCall(intrinsicName)) #endif { CORINFO_CLASS_HANDLE tmpClass; CORINFO_ARG_LIST_HANDLE arg; var_types op1Type; var_types op2Type; switch (sig->numArgs) { case 1: op1 = impPopStack().val; arg = sig->args; op1Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op1->TypeGet() != genActualType(op1Type)) { assert(varTypeIsFloating(op1)); op1 = gtNewCastNode(callType, op1, false, callType); } op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicName, method); break; case 2: op2 = impPopStack().val; op1 = impPopStack().val; arg = sig->args; op1Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op1->TypeGet() != genActualType(op1Type)) { assert(varTypeIsFloating(op1)); op1 = gtNewCastNode(callType, op1, false, callType); } arg = info.compCompHnd->getArgNext(arg); op2Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op2->TypeGet() != genActualType(op2Type)) { assert(varTypeIsFloating(op2)); op2 = gtNewCastNode(callType, op2, false, callType); } op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicName, method); break; default: NO_WAY("Unsupported number of args for Math Intrinsic"); } if (IsIntrinsicImplementedByUserCall(intrinsicName)) { op1->gtFlags |= GTF_CALL; } } return op1; } //------------------------------------------------------------------------ // lookupNamedIntrinsic: map method to jit named intrinsic value // // Arguments: // method -- method handle for method // // Return Value: // Id for the named intrinsic, or Illegal if none. // // Notes: // method should have CORINFO_FLG_INTRINSIC set in its attributes, // otherwise it is not a named jit intrinsic. // NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method) { const char* className = nullptr; const char* namespaceName = nullptr; const char* enclosingClassName = nullptr; const char* methodName = info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName, &enclosingClassName); JITDUMP("Named Intrinsic "); if (namespaceName != nullptr) { JITDUMP("%s.", namespaceName); } if (enclosingClassName != nullptr) { JITDUMP("%s.", enclosingClassName); } if (className != nullptr) { JITDUMP("%s.", className); } if (methodName != nullptr) { JITDUMP("%s", methodName); } if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr)) { // Check if we are dealing with an MD array's known runtime method CorInfoArrayIntrinsic arrayFuncIndex = info.compCompHnd->getArrayIntrinsicID(method); switch (arrayFuncIndex) { case CorInfoArrayIntrinsic::GET: JITDUMP("ARRAY_FUNC_GET: Recognized\n"); return NI_Array_Get; case CorInfoArrayIntrinsic::SET: JITDUMP("ARRAY_FUNC_SET: Recognized\n"); return NI_Array_Set; case CorInfoArrayIntrinsic::ADDRESS: JITDUMP("ARRAY_FUNC_ADDRESS: Recognized\n"); return NI_Array_Address; default: break; } JITDUMP(": Not recognized, not enough metadata\n"); return NI_Illegal; } JITDUMP(": "); NamedIntrinsic result = NI_Illegal; if (strcmp(namespaceName, "System") == 0) { if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0)) { result = NI_System_Enum_HasFlag; } else if (strcmp(className, "Activator") == 0) { if (strcmp(methodName, "AllocatorOf") == 0) { result = NI_System_Activator_AllocatorOf; } else if (strcmp(methodName, "DefaultConstructorOf") == 0) { result = NI_System_Activator_DefaultConstructorOf; } } else if (strcmp(className, "ByReference`1") == 0) { if (strcmp(methodName, ".ctor") == 0) { result = NI_System_ByReference_ctor; } else if (strcmp(methodName, "get_Value") == 0) { result = NI_System_ByReference_get_Value; } } else if (strcmp(className, "Math") == 0 || strcmp(className, "MathF") == 0) { if (strcmp(methodName, "Abs") == 0) { result = NI_System_Math_Abs; } else if (strcmp(methodName, "Acos") == 0) { result = NI_System_Math_Acos; } else if (strcmp(methodName, "Acosh") == 0) { result = NI_System_Math_Acosh; } else if (strcmp(methodName, "Asin") == 0) { result = NI_System_Math_Asin; } else if (strcmp(methodName, "Asinh") == 0) { result = NI_System_Math_Asinh; } else if (strcmp(methodName, "Atan") == 0) { result = NI_System_Math_Atan; } else if (strcmp(methodName, "Atanh") == 0) { result = NI_System_Math_Atanh; } else if (strcmp(methodName, "Atan2") == 0) { result = NI_System_Math_Atan2; } else if (strcmp(methodName, "Cbrt") == 0) { result = NI_System_Math_Cbrt; } else if (strcmp(methodName, "Ceiling") == 0) { result = NI_System_Math_Ceiling; } else if (strcmp(methodName, "Cos") == 0) { result = NI_System_Math_Cos; } else if (strcmp(methodName, "Cosh") == 0) { result = NI_System_Math_Cosh; } else if (strcmp(methodName, "Exp") == 0) { result = NI_System_Math_Exp; } else if (strcmp(methodName, "Floor") == 0) { result = NI_System_Math_Floor; } else if (strcmp(methodName, "FMod") == 0) { result = NI_System_Math_FMod; } else if (strcmp(methodName, "FusedMultiplyAdd") == 0) { result = NI_System_Math_FusedMultiplyAdd; } else if (strcmp(methodName, "ILogB") == 0) { result = NI_System_Math_ILogB; } else if (strcmp(methodName, "Log") == 0) { result = NI_System_Math_Log; } else if (strcmp(methodName, "Log2") == 0) { result = NI_System_Math_Log2; } else if (strcmp(methodName, "Log10") == 0) { result = NI_System_Math_Log10; } else if (strcmp(methodName, "Max") == 0) { result = NI_System_Math_Max; } else if (strcmp(methodName, "Min") == 0) { result = NI_System_Math_Min; } else if (strcmp(methodName, "Pow") == 0) { result = NI_System_Math_Pow; } else if (strcmp(methodName, "Round") == 0) { result = NI_System_Math_Round; } else if (strcmp(methodName, "Sin") == 0) { result = NI_System_Math_Sin; } else if (strcmp(methodName, "Sinh") == 0) { result = NI_System_Math_Sinh; } else if (strcmp(methodName, "Sqrt") == 0) { result = NI_System_Math_Sqrt; } else if (strcmp(methodName, "Tan") == 0) { result = NI_System_Math_Tan; } else if (strcmp(methodName, "Tanh") == 0) { result = NI_System_Math_Tanh; } else if (strcmp(methodName, "Truncate") == 0) { result = NI_System_Math_Truncate; } } else if (strcmp(className, "GC") == 0) { if (strcmp(methodName, "KeepAlive") == 0) { result = NI_System_GC_KeepAlive; } } else if (strcmp(className, "Array") == 0) { if (strcmp(methodName, "Clone") == 0) { result = NI_System_Array_Clone; } else if (strcmp(methodName, "GetLength") == 0) { result = NI_System_Array_GetLength; } else if (strcmp(methodName, "GetLowerBound") == 0) { result = NI_System_Array_GetLowerBound; } else if (strcmp(methodName, "GetUpperBound") == 0) { result = NI_System_Array_GetUpperBound; } } else if (strcmp(className, "Object") == 0) { if (strcmp(methodName, "MemberwiseClone") == 0) { result = NI_System_Object_MemberwiseClone; } else if (strcmp(methodName, "GetType") == 0) { result = NI_System_Object_GetType; } else if (strcmp(methodName, "MethodTableOf") == 0) { result = NI_System_Object_MethodTableOf; } } else if (strcmp(className, "RuntimeTypeHandle") == 0) { if (strcmp(methodName, "GetValueInternal") == 0) { result = NI_System_RuntimeTypeHandle_GetValueInternal; } } else if (strcmp(className, "Type") == 0) { if (strcmp(methodName, "get_IsValueType") == 0) { result = NI_System_Type_get_IsValueType; } else if (strcmp(methodName, "IsAssignableFrom") == 0) { result = NI_System_Type_IsAssignableFrom; } else if (strcmp(methodName, "IsAssignableTo") == 0) { result = NI_System_Type_IsAssignableTo; } else if (strcmp(methodName, "op_Equality") == 0) { result = NI_System_Type_op_Equality; } else if (strcmp(methodName, "op_Inequality") == 0) { result = NI_System_Type_op_Inequality; } else if (strcmp(methodName, "GetTypeFromHandle") == 0) { result = NI_System_Type_GetTypeFromHandle; } } else if (strcmp(className, "String") == 0) { if (strcmp(methodName, "Equals") == 0) { result = NI_System_String_Equals; } else if (strcmp(methodName, "get_Chars") == 0) { result = NI_System_String_get_Chars; } else if (strcmp(methodName, "get_Length") == 0) { result = NI_System_String_get_Length; } else if (strcmp(methodName, "op_Implicit") == 0) { result = NI_System_String_op_Implicit; } else if (strcmp(methodName, "StartsWith") == 0) { result = NI_System_String_StartsWith; } } else if (strcmp(className, "MemoryExtensions") == 0) { if (strcmp(methodName, "AsSpan") == 0) { result = NI_System_MemoryExtensions_AsSpan; } if (strcmp(methodName, "SequenceEqual") == 0) { result = NI_System_MemoryExtensions_SequenceEqual; } else if (strcmp(methodName, "Equals") == 0) { result = NI_System_MemoryExtensions_Equals; } else if (strcmp(methodName, "StartsWith") == 0) { result = NI_System_MemoryExtensions_StartsWith; } } else if (strcmp(className, "Span`1") == 0) { if (strcmp(methodName, "get_Item") == 0) { result = NI_System_Span_get_Item; } } else if (strcmp(className, "ReadOnlySpan`1") == 0) { if (strcmp(methodName, "get_Item") == 0) { result = NI_System_ReadOnlySpan_get_Item; } } else if (strcmp(className, "EETypePtr") == 0) { if (strcmp(methodName, "EETypePtrOf") == 0) { result = NI_System_EETypePtr_EETypePtrOf; } } } else if (strcmp(namespaceName, "System.Threading") == 0) { if (strcmp(className, "Thread") == 0) { if (strcmp(methodName, "get_CurrentThread") == 0) { result = NI_System_Threading_Thread_get_CurrentThread; } else if (strcmp(methodName, "get_ManagedThreadId") == 0) { result = NI_System_Threading_Thread_get_ManagedThreadId; } } else if (strcmp(className, "Interlocked") == 0) { #ifndef TARGET_ARM64 // TODO-CQ: Implement for XArch (https://github.com/dotnet/runtime/issues/32239). if (strcmp(methodName, "And") == 0) { result = NI_System_Threading_Interlocked_And; } else if (strcmp(methodName, "Or") == 0) { result = NI_System_Threading_Interlocked_Or; } #endif if (strcmp(methodName, "CompareExchange") == 0) { result = NI_System_Threading_Interlocked_CompareExchange; } else if (strcmp(methodName, "Exchange") == 0) { result = NI_System_Threading_Interlocked_Exchange; } else if (strcmp(methodName, "ExchangeAdd") == 0) { result = NI_System_Threading_Interlocked_ExchangeAdd; } else if (strcmp(methodName, "MemoryBarrier") == 0) { result = NI_System_Threading_Interlocked_MemoryBarrier; } else if (strcmp(methodName, "ReadMemoryBarrier") == 0) { result = NI_System_Threading_Interlocked_ReadMemoryBarrier; } } } #if defined(TARGET_XARCH) || defined(TARGET_ARM64) else if (strcmp(namespaceName, "System.Buffers.Binary") == 0) { if ((strcmp(className, "BinaryPrimitives") == 0) && (strcmp(methodName, "ReverseEndianness") == 0)) { result = NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness; } } #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) else if (strcmp(namespaceName, "System.Collections.Generic") == 0) { if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0)) { result = NI_System_Collections_Generic_EqualityComparer_get_Default; } else if ((strcmp(className, "Comparer`1") == 0) && (strcmp(methodName, "get_Default") == 0)) { result = NI_System_Collections_Generic_Comparer_get_Default; } } else if ((strcmp(namespaceName, "System.Numerics") == 0) && (strcmp(className, "BitOperations") == 0)) { if (strcmp(methodName, "PopCount") == 0) { result = NI_System_Numerics_BitOperations_PopCount; } } #ifdef FEATURE_HW_INTRINSICS else if (strcmp(namespaceName, "System.Numerics") == 0) { CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(method, &sig); int sizeOfVectorT = getSIMDVectorRegisterByteLength(); result = SimdAsHWIntrinsicInfo::lookupId(&sig, className, methodName, enclosingClassName, sizeOfVectorT); } #endif // FEATURE_HW_INTRINSICS else if ((strcmp(namespaceName, "System.Runtime.CompilerServices") == 0) && (strcmp(className, "RuntimeHelpers") == 0)) { if (strcmp(methodName, "CreateSpan") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan; } else if (strcmp(methodName, "InitializeArray") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray; } else if (strcmp(methodName, "IsKnownConstant") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant; } } else if (strncmp(namespaceName, "System.Runtime.Intrinsics", 25) == 0) { // We go down this path even when FEATURE_HW_INTRINSICS isn't enabled // so we can specially handle IsSupported and recursive calls. // This is required to appropriately handle the intrinsics on platforms // which don't support them. On such a platform methods like Vector64.Create // will be seen as `Intrinsic` and `mustExpand` due to having a code path // which is recursive. When such a path is hit we expect it to be handled by // the importer and we fire an assert if it wasn't and in previous versions // of the JIT would fail fast. This was changed to throw a PNSE instead but // we still assert as most intrinsics should have been recognized/handled. // In order to avoid the assert, we specially handle the IsSupported checks // (to better allow dead-code optimizations) and we explicitly throw a PNSE // as we know that is the desired behavior for the HWIntrinsics when not // supported. For cases like Vector64.Create, this is fine because it will // be behind a relevant IsSupported check and will never be hit and the // software fallback will be executed instead. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef FEATURE_HW_INTRINSICS namespaceName += 25; const char* platformNamespaceName; #if defined(TARGET_XARCH) platformNamespaceName = ".X86"; #elif defined(TARGET_ARM64) platformNamespaceName = ".Arm"; #else #error Unsupported platform #endif if ((namespaceName[0] == '\0') || (strcmp(namespaceName, platformNamespaceName) == 0)) { CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(method, &sig); result = HWIntrinsicInfo::lookupId(this, &sig, className, methodName, enclosingClassName); } #endif // FEATURE_HW_INTRINSICS if (result == NI_Illegal) { if ((strcmp(methodName, "get_IsSupported") == 0) || (strcmp(methodName, "get_IsHardwareAccelerated") == 0)) { // This allows the relevant code paths to be dropped as dead code even // on platforms where FEATURE_HW_INTRINSICS is not supported. result = NI_IsSupported_False; } else if (gtIsRecursiveCall(method)) { // For the framework itself, any recursive intrinsics will either be // only supported on a single platform or will be guarded by a relevant // IsSupported check so the throw PNSE will be valid or dropped. result = NI_Throw_PlatformNotSupportedException; } } } else if (strcmp(namespaceName, "System.StubHelpers") == 0) { if (strcmp(className, "StubHelpers") == 0) { if (strcmp(methodName, "GetStubContext") == 0) { result = NI_System_StubHelpers_GetStubContext; } else if (strcmp(methodName, "NextCallReturnAddress") == 0) { result = NI_System_StubHelpers_NextCallReturnAddress; } } } if (result == NI_Illegal) { JITDUMP("Not recognized\n"); } else if (result == NI_IsSupported_False) { JITDUMP("Unsupported - return false"); } else if (result == NI_Throw_PlatformNotSupportedException) { JITDUMP("Unsupported - throw PlatformNotSupportedException"); } else { JITDUMP("Recognized\n"); } return result; } //------------------------------------------------------------------------ // impUnsupportedNamedIntrinsic: Throws an exception for an unsupported named intrinsic // // Arguments: // helper - JIT helper ID for the exception to be thrown // method - method handle of the intrinsic function. // sig - signature of the intrinsic call // mustExpand - true if the intrinsic must return a GenTree*; otherwise, false // // Return Value: // a gtNewMustThrowException if mustExpand is true; otherwise, nullptr // GenTree* Compiler::impUnsupportedNamedIntrinsic(unsigned helper, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand) { // We've hit some error case and may need to return a node for the given error. // // When `mustExpand=false`, we are attempting to inline the intrinsic directly into another method. In this // scenario, we need to return `nullptr` so that a GT_CALL to the intrinsic is emitted instead. This is to // ensure that everything continues to behave correctly when optimizations are enabled (e.g. things like the // inliner may expect the node we return to have a certain signature, and the `MustThrowException` node won't // match that). // // When `mustExpand=true`, we are in a GT_CALL to the intrinsic and are attempting to JIT it. This will generally // be in response to an indirect call (e.g. done via reflection) or in response to an earlier attempt returning // `nullptr` (under `mustExpand=false`). In that scenario, we are safe to return the `MustThrowException` node. if (mustExpand) { for (unsigned i = 0; i < sig->numArgs; i++) { impPopStack(); } return gtNewMustThrowException(helper, JITtype2varType(sig->retType), sig->retTypeClass); } else { return nullptr; } } /*****************************************************************************/ GenTree* Compiler::impArrayAccessIntrinsic( CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, NamedIntrinsic intrinsicName) { /* If we are generating SMALL_CODE, we don't want to use intrinsics for the following, as it generates fatter code. */ if (compCodeOpt() == SMALL_CODE) { return nullptr; } /* These intrinsics generate fatter (but faster) code and are only done if we don't need SMALL_CODE */ unsigned rank = (intrinsicName == NI_Array_Set) ? (sig->numArgs - 1) : sig->numArgs; // The rank 1 case is special because it has to handle two array formats // we will simply not do that case if (rank > GT_ARR_MAX_RANK || rank <= 1) { return nullptr; } CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr; var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd)); // For the ref case, we will only be able to inline if the types match // (verifier checks for this, we don't care for the nonverified case and the // type is final (so we don't need to do the cast) if ((intrinsicName != NI_Array_Get) && !readonlyCall && varTypeIsGC(elemType)) { // Get the call site signature CORINFO_SIG_INFO LocalSig; eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig); assert(LocalSig.hasThis()); CORINFO_CLASS_HANDLE actualElemClsHnd; if (intrinsicName == NI_Array_Set) { // Fetch the last argument, the one that indicates the type we are setting. CORINFO_ARG_LIST_HANDLE argType = LocalSig.args; for (unsigned r = 0; r < rank; r++) { argType = info.compCompHnd->getArgNext(argType); } typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType); actualElemClsHnd = argInfo.GetClassHandle(); } else { assert(intrinsicName == NI_Array_Address); // Fetch the return type typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass); assert(retInfo.IsByRef()); actualElemClsHnd = retInfo.GetClassHandle(); } // if it's not final, we can't do the optimization if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL)) { return nullptr; } } unsigned arrayElemSize; if (elemType == TYP_STRUCT) { assert(arrElemClsHnd); arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd); } else { arrayElemSize = genTypeSize(elemType); } if ((unsigned char)arrayElemSize != arrayElemSize) { // arrayElemSize would be truncated as an unsigned char. // This means the array element is too large. Don't do the optimization. return nullptr; } GenTree* val = nullptr; if (intrinsicName == NI_Array_Set) { // Assignment of a struct is more work, and there are more gets than sets. if (elemType == TYP_STRUCT) { return nullptr; } val = impPopStack().val; assert(genActualType(elemType) == genActualType(val->gtType) || (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) || (elemType == TYP_INT && val->gtType == TYP_BYREF) || (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT)); } noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK); GenTree* inds[GT_ARR_MAX_RANK]; for (unsigned k = rank; k > 0; k--) { inds[k - 1] = impPopStack().val; } GenTree* arr = impPopStack().val; assert(arr->gtType == TYP_REF); GenTree* arrElem = new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank), static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]); if (intrinsicName != NI_Array_Address) { if (varTypeIsStruct(elemType)) { arrElem = gtNewObjNode(sig->retTypeClass, arrElem); } else { arrElem = gtNewOperNode(GT_IND, elemType, arrElem); } } if (intrinsicName == NI_Array_Set) { assert(val != nullptr); return gtNewAssignNode(arrElem, val); } else { return arrElem; } } //------------------------------------------------------------------------ // impKeepAliveIntrinsic: Import the GC.KeepAlive intrinsic call // // Imports the intrinsic as a GT_KEEPALIVE node, and, as an optimization, // if the object to keep alive is a GT_BOX, removes its side effects and // uses the address of a local (copied from the box's source if needed) // as the operand for GT_KEEPALIVE. For the BOX optimization, if the class // of the box has no GC fields, a GT_NOP is returned. // // Arguments: // objToKeepAlive - the intrinisic call's argument // // Return Value: // The imported GT_KEEPALIVE or GT_NOP - see description. // GenTree* Compiler::impKeepAliveIntrinsic(GenTree* objToKeepAlive) { assert(objToKeepAlive->TypeIs(TYP_REF)); if (opts.OptimizationEnabled() && objToKeepAlive->IsBoxedValue()) { CORINFO_CLASS_HANDLE boxedClass = lvaGetDesc(objToKeepAlive->AsBox()->BoxOp()->AsLclVar())->lvClassHnd; ClassLayout* layout = typGetObjLayout(boxedClass); if (!layout->HasGCPtr()) { gtTryRemoveBoxUpstreamEffects(objToKeepAlive, BR_REMOVE_AND_NARROW); JITDUMP("\nBOX class has no GC fields, KEEPALIVE is a NOP"); return gtNewNothingNode(); } GenTree* boxSrc = gtTryRemoveBoxUpstreamEffects(objToKeepAlive, BR_REMOVE_BUT_NOT_NARROW); if (boxSrc != nullptr) { unsigned boxTempNum; if (boxSrc->OperIs(GT_LCL_VAR)) { boxTempNum = boxSrc->AsLclVarCommon()->GetLclNum(); } else { boxTempNum = lvaGrabTemp(true DEBUGARG("Temp for the box source")); GenTree* boxTempAsg = gtNewTempAssign(boxTempNum, boxSrc); Statement* boxAsgStmt = objToKeepAlive->AsBox()->gtCopyStmtWhenInlinedBoxValue; boxAsgStmt->SetRootNode(boxTempAsg); } JITDUMP("\nImporting KEEPALIVE(BOX) as KEEPALIVE(ADDR(LCL_VAR V%02u))", boxTempNum); GenTree* boxTemp = gtNewLclvNode(boxTempNum, boxSrc->TypeGet()); GenTree* boxTempAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, boxTemp); return gtNewKeepAliveNode(boxTempAddr); } } return gtNewKeepAliveNode(objToKeepAlive); } bool Compiler::verMergeEntryStates(BasicBlock* block, bool* changed) { unsigned i; // do some basic checks first if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth) { return false; } if (verCurrentState.esStackDepth > 0) { // merge stack types StackEntry* parentStack = block->bbStackOnEntry(); StackEntry* childStack = verCurrentState.esStack; for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++) { if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == false) { return false; } } } // merge initialization status of this ptr if (verTrackObjCtorInitState) { // If we're tracking the CtorInitState, then it must not be unknown in the current state. assert(verCurrentState.thisInitialized != TIS_Bottom); // If the successor block's thisInit state is unknown, copy it from the current state. if (block->bbThisOnEntry() == TIS_Bottom) { *changed = true; verSetThisInit(block, verCurrentState.thisInitialized); } else if (verCurrentState.thisInitialized != block->bbThisOnEntry()) { if (block->bbThisOnEntry() != TIS_Top) { *changed = true; verSetThisInit(block, TIS_Top); if (block->bbFlags & BBF_FAILED_VERIFICATION) { // The block is bad. Control can flow through the block to any handler that catches the // verification exception, but the importer ignores bad blocks and therefore won't model // this flow in the normal way. To complete the merge into the bad block, the new state // needs to be manually pushed to the handlers that may be reached after the verification // exception occurs. // // Usually, the new state was already propagated to the relevant handlers while processing // the predecessors of the bad block. The exception is when the bad block is at the start // of a try region, meaning it is protected by additional handlers that do not protect its // predecessors. // if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0)) { // Push TIS_Top to the handlers that protect the bad block. Note that this can cause // recursive calls back into this code path (if successors of the current bad block are // also bad blocks). // ThisInitState origTIS = verCurrentState.thisInitialized; verCurrentState.thisInitialized = TIS_Top; impVerifyEHBlock(block, true); verCurrentState.thisInitialized = origTIS; } } } } } else { assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom); } return true; } /***************************************************************************** * 'logMsg' is true if a log message needs to be logged. false if the caller has * already logged it (presumably in a more detailed fashion than done here) */ void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)) { block->bbJumpKind = BBJ_THROW; block->bbFlags |= BBF_FAILED_VERIFICATION; block->bbFlags &= ~BBF_IMPORTED; impCurStmtOffsSet(block->bbCodeOffs); // Clear the statement list as it exists so far; we're only going to have a verification exception. impStmtList = impLastStmt = nullptr; #ifdef DEBUG if (logMsg) { JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName, block->bbCodeOffs, block->bbCodeOffsEnd)); if (verbose) { printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs); } } if (JitConfig.DebugBreakOnVerificationFailure()) { DebugBreak(); } #endif impBeginTreeList(); // if the stack is non-empty evaluate all the side-effects if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } assert(verCurrentState.esStackDepth == 0); GenTree* op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewCallArgs(gtNewIconNode(block->bbCodeOffs))); // verCurrentState.esStackDepth = 0; impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // The inliner is not able to handle methods that require throw block, so // make sure this methods never gets inlined. info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE); } /***************************************************************************** * */ void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)) { verResetCurrentState(block, &verCurrentState); verConvertBBToThrowVerificationException(block DEBUGARG(logMsg)); #ifdef DEBUG impNoteLastILoffs(); // Remember at which BC offset the tree was finished #endif // DEBUG } /******************************************************************************/ typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd) { assert(ciType < CORINFO_TYPE_COUNT); typeInfo tiResult; switch (ciType) { case CORINFO_TYPE_STRING: case CORINFO_TYPE_CLASS: tiResult = verMakeTypeInfo(clsHnd); if (!tiResult.IsType(TI_REF)) { // type must be consistent with element type return typeInfo(); } break; #ifdef TARGET_64BIT case CORINFO_TYPE_NATIVEINT: case CORINFO_TYPE_NATIVEUINT: if (clsHnd) { // If we have more precise information, use it return verMakeTypeInfo(clsHnd); } else { return typeInfo::nativeInt(); } break; #endif // TARGET_64BIT case CORINFO_TYPE_VALUECLASS: case CORINFO_TYPE_REFANY: tiResult = verMakeTypeInfo(clsHnd); // type must be constant with element type; if (!tiResult.IsValueClass()) { return typeInfo(); } break; case CORINFO_TYPE_VAR: return verMakeTypeInfo(clsHnd); case CORINFO_TYPE_PTR: // for now, pointers are treated as an error case CORINFO_TYPE_VOID: return typeInfo(); break; case CORINFO_TYPE_BYREF: { CORINFO_CLASS_HANDLE childClassHandle; CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle); return ByRef(verMakeTypeInfo(childType, childClassHandle)); } break; default: if (clsHnd) { // If we have more precise information, use it return typeInfo(TI_STRUCT, clsHnd); } else { return typeInfo(JITtype2tiType(ciType)); } } return tiResult; } /******************************************************************************/ typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */) { if (clsHnd == nullptr) { return typeInfo(); } // Byrefs should only occur in method and local signatures, which are accessed // using ICorClassInfo and ICorClassInfo.getChildType. // So findClass() and getClassAttribs() should not be called for byrefs if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF) { assert(!"Did findClass() return a Byref?"); return typeInfo(); } unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd); if (attribs & CORINFO_FLG_VALUECLASS) { CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd); // Meta-data validation should ensure that CORINF_TYPE_BYREF should // not occur here, so we may want to change this to an assert instead. if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR) { return typeInfo(); } #ifdef TARGET_64BIT if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT) { return typeInfo::nativeInt(); } #endif // TARGET_64BIT if (t != CORINFO_TYPE_UNDEF) { return (typeInfo(JITtype2tiType(t))); } else if (bashStructToRef) { return (typeInfo(TI_REF, clsHnd)); } else { return (typeInfo(TI_STRUCT, clsHnd)); } } else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE) { // See comment in _typeInfo.h for why we do it this way. return (typeInfo(TI_REF, clsHnd, true)); } else { return (typeInfo(TI_REF, clsHnd)); } } /******************************************************************************/ bool Compiler::verIsSDArray(const typeInfo& ti) { if (ti.IsNullObjRef()) { // nulls are SD arrays return true; } if (!ti.IsType(TI_REF)) { return false; } if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef())) { return false; } return true; } /******************************************************************************/ /* Given 'arrayObjectType' which is an array type, fetch the element type. */ /* Returns an error type if anything goes wrong */ typeInfo Compiler::verGetArrayElemType(const typeInfo& arrayObjectType) { assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explicitly since that is a success case if (!verIsSDArray(arrayObjectType)) { return typeInfo(); } CORINFO_CLASS_HANDLE childClassHandle = nullptr; CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle); return verMakeTypeInfo(ciType, childClassHandle); } /***************************************************************************** */ typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args) { CORINFO_CLASS_HANDLE classHandle; CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle)); var_types type = JITtype2varType(ciType); if (varTypeIsGC(type)) { // For efficiency, getArgType only returns something in classHandle for // value types. For other types that have addition type info, you // have to call back explicitly classHandle = info.compCompHnd->getArgClass(sig, args); if (!classHandle) { NO_WAY("Could not figure out Class specified in argument or local signature"); } } return verMakeTypeInfo(ciType, classHandle); } bool Compiler::verIsByRefLike(const typeInfo& ti) { if (ti.IsByRef()) { return true; } if (!ti.IsType(TI_STRUCT)) { return false; } return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_BYREF_LIKE; } bool Compiler::verIsSafeToReturnByRef(const typeInfo& ti) { if (ti.IsPermanentHomeByRef()) { return true; } else { return false; } } bool Compiler::verIsBoxable(const typeInfo& ti) { return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables || ti.IsUnboxedGenericTypeVar() || (ti.IsType(TI_STRUCT) && // exclude byreflike structs !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_BYREF_LIKE))); } // Is it a boxed value type? bool Compiler::verIsBoxedValueType(const typeInfo& ti) { if (ti.GetType() == TI_REF) { CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef(); return !!eeIsValueClass(clsHnd); } else { return false; } } /***************************************************************************** * * Check if a TailCall is legal. */ bool Compiler::verCheckTailCallConstraint( OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter? bool speculative // If true, won't throw if verificatoin fails. Instead it will // return false to the caller. // If false, it will throw. ) { DWORD mflags; CORINFO_SIG_INFO sig; unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so // this counter is used to keep track of how many items have been // virtually popped CORINFO_METHOD_HANDLE methodHnd = nullptr; CORINFO_CLASS_HANDLE methodClassHnd = nullptr; unsigned methodClassFlgs = 0; assert(impOpcodeIsCallOpcode(opcode)); if (compIsForInlining()) { return false; } // for calli, VerifyOrReturn that this is not a virtual method if (opcode == CEE_CALLI) { /* Get the call sig */ eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); // We don't know the target method, so we have to infer the flags, or // assume the worst-case. mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC; } else { methodHnd = pResolvedToken->hMethod; mflags = info.compCompHnd->getMethodAttribs(methodHnd); // When verifying generic code we pair the method handle with its // owning class to get the exact method signature. methodClassHnd = pResolvedToken->hClass; assert(methodClassHnd); eeGetMethodSig(methodHnd, &sig, methodClassHnd); // opcode specific check methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd); } // We must have got the methodClassHnd if opcode is not CEE_CALLI assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI); if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); } // check compatibility of the arguments unsigned int argCount; argCount = sig.numArgs; CORINFO_ARG_LIST_HANDLE args; args = sig.args; while (argCount--) { typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack(); // check that the argument is not a byref for tailcalls VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative); // For unsafe code, we might have parameters containing pointer to the stack location. // Disallow the tailcall for this kind. CORINFO_CLASS_HANDLE classHandle; CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle)); VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative); args = info.compCompHnd->getArgNext(args); } // update popCount popCount += sig.numArgs; // check for 'this' which is on non-static methods, not called via NEWOBJ if (!(mflags & CORINFO_FLG_STATIC)) { // Always update the popCount. // This is crucial for the stack calculation to be correct. typeInfo tiThis = impStackTop(popCount).seTypeInfo; popCount++; if (opcode == CEE_CALLI) { // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object // on the stack. if (tiThis.IsValueClass()) { tiThis.MakeByRef(); } VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative); } else { // Check type compatibility of the this argument typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); } VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative); } } // Tail calls on constrained calls should be illegal too: // when instantiated at a value type, a constrained call may pass the address of a stack allocated value VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative); // Get the exact view of the signature for an array method if (sig.retType != CORINFO_TYPE_VOID) { if (methodClassFlgs & CORINFO_FLG_ARRAY) { assert(opcode != CEE_CALLI); eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); } } typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass); typeInfo tiCallerRetType = verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass); // void return type gets morphed into the error type, so we have to treat them specially here if (sig.retType == CORINFO_TYPE_VOID) { VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch", speculative); } else { VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType), NormaliseForStack(tiCallerRetType), true), "tailcall return mismatch", speculative); } // for tailcall, stack must be empty VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative); return true; // Yes, tailcall is legal } /***************************************************************************** * * Checks the IL verification rules for the call */ void Compiler::verVerifyCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, bool tailCall, bool readonlyCall, const BYTE* delegateCreateStart, const BYTE* codeAddr, CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName)) { DWORD mflags; CORINFO_SIG_INFO* sig = nullptr; unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so // this counter is used to keep track of how many items have been // virtually popped // for calli, VerifyOrReturn that this is not a virtual method if (opcode == CEE_CALLI) { Verify(false, "Calli not verifiable"); return; } //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item. mflags = callInfo->verMethodFlags; sig = &callInfo->verSig; if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); } // opcode specific check unsigned methodClassFlgs = callInfo->classFlags; switch (opcode) { case CEE_CALLVIRT: // cannot do callvirt on valuetypes VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class"); VerifyOrReturn(sig->hasThis(), "CallVirt on static method"); break; case CEE_NEWOBJ: { assert(!tailCall); // Importer should not allow this VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC), "newobj must be on instance"); if (methodClassFlgs & CORINFO_FLG_DELEGATE) { VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor"); typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack(); typeInfo tiDeclaredFtn = verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack(); VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type"); assert(popCount == 0); typeInfo tiActualObj = impStackTop(1).seTypeInfo; typeInfo tiActualFtn = impStackTop(0).seTypeInfo; VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg"); VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch"); VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF), "delegate object type mismatch"); CORINFO_CLASS_HANDLE objTypeHandle = tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef(); // the method signature must be compatible with the delegate's invoke method // check that for virtual functions, the type of the object used to get the // ftn ptr is the same as the type of the object passed to the delegate ctor. // since this is a bit of work to determine in general, we pattern match stylized // code sequences // the delegate creation code check, which used to be done later, is now done here // so we can read delegateMethodRef directly from // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence; // we then use it in our call to isCompatibleDelegate(). mdMemberRef delegateMethodRef = mdMemberRefNil; VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef), "must create delegates with certain IL"); CORINFO_RESOLVED_TOKEN delegateResolvedToken; delegateResolvedToken.tokenContext = impTokenLookupContextHandle; delegateResolvedToken.tokenScope = info.compScopeHnd; delegateResolvedToken.token = delegateMethodRef; delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method; info.compCompHnd->resolveToken(&delegateResolvedToken); CORINFO_CALL_INFO delegateCallInfo; eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */, CORINFO_CALLINFO_SECURITYCHECKS, &delegateCallInfo); bool isOpenDelegate = false; VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass, tiActualFtn.GetMethod(), pResolvedToken->hClass, &isOpenDelegate), "function incompatible with delegate"); // check the constraints on the target method VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass), "delegate target has unsatisfied class constraints"); VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass, tiActualFtn.GetMethod()), "delegate target has unsatisfied method constraints"); // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch) // for additional verification rules for delegates CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod(); DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle); if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr)) { if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)) { VerifyOrReturn((tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly()) || verIsBoxedValueType(tiActualObj), "The 'this' parameter to the call must be either the calling method's " "'this' parameter or " "a boxed value type."); } } if (actualMethodAttribs & CORINFO_FLG_PROTECTED) { bool targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC; Verify(targetIsStatic || !isOpenDelegate, "Unverifiable creation of an open instance delegate for a protected member."); CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic) ? info.compClassHnd : tiActualObj.GetClassHandleForObjRef(); // In the case of protected methods, it is a requirement that the 'this' // pointer be a subclass of the current context. Perform this check. Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd), "Accessing protected method through wrong type."); } goto DONE_ARGS; } } // fall thru to default checks FALLTHROUGH; default: VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract"); } VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)), "can only newobj a delegate constructor"); // check compatibility of the arguments unsigned int argCount; argCount = sig->numArgs; CORINFO_ARG_LIST_HANDLE args; args = sig->args; while (argCount--) { typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo; typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack(); VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch"); args = info.compCompHnd->getArgNext(args); } DONE_ARGS: // update popCount popCount += sig->numArgs; // check for 'this' which are is non-static methods, not called via NEWOBJ CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd; if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ)) { typeInfo tiThis = impStackTop(popCount).seTypeInfo; popCount++; // If it is null, we assume we can access it (since it will AV shortly) // If it is anything but a reference class, there is no hierarchy, so // again, we don't need the precise instance class to compute 'protected' access if (tiThis.IsType(TI_REF)) { instanceClassHnd = tiThis.GetClassHandleForObjRef(); } // Check type compatibility of the this argument typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); } // If this is a call to the base class .ctor, set thisPtr Init for // this block. if (mflags & CORINFO_FLG_CONSTRUCTOR) { if (verTrackObjCtorInitState && tiThis.IsThisPtr() && verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass)) { assert(verCurrentState.thisInitialized != TIS_Bottom); // This should never be the case just from the logic of the verifier. VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit, "Call to base class constructor when 'this' is possibly initialized"); // Otherwise, 'this' is now initialized. verCurrentState.thisInitialized = TIS_Init; tiThis.SetInitialisedObjRef(); } else { // We allow direct calls to value type constructors // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a // constrained callvirt to illegally re-enter a .ctor on a value of reference type. VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(), "Bad call to a constructor"); } } if (pConstrainedResolvedToken != nullptr) { VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call"); typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass); // We just dereference this and test for equality tiThis.DereferenceByRef(); VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint), "this type mismatch with constrained type operand"); // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass); } // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef()) { tiDeclaredThis.SetIsReadonlyByRef(); } VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch"); if (tiThis.IsByRef()) { // Find the actual type where the method exists (as opposed to what is declared // in the metadata). This is to prevent passing a byref as the "this" argument // while calling methods like System.ValueType.GetHashCode() which expect boxed objects. CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod); VerifyOrReturn(eeIsValueClass(actualClassHnd), "Call to base type of valuetype (which is never a valuetype)"); } // Rules for non-virtual call to a non-final virtual method: // Define: // The "this" pointer is considered to be "possibly written" if // 1. Its address have been taken (LDARGA 0) anywhere in the method. // (or) // 2. It has been stored to (STARG.0) anywhere in the method. // A non-virtual call to a non-final virtual method is only allowed if // 1. The this pointer passed to the callee is an instance of a boxed value type. // (or) // 2. The this pointer passed to the callee is the current method's this pointer. // (and) The current method's this pointer is not "possibly written". // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to // virtual methods. (Luckily this does affect .ctors, since they are not virtual). // This is stronger that is strictly needed, but implementing a laxer rule is significantly // hard and more error prone. if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)) { VerifyOrReturn((tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly()) || verIsBoxedValueType(tiThis), "The 'this' parameter to the call must be either the calling method's 'this' parameter or " "a boxed value type."); } } // check any constraints on the callee's class and type parameters VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass), "method has unsatisfied class constraints"); VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod), "method has unsatisfied method constraints"); if (mflags & CORINFO_FLG_PROTECTED) { VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd), "Can't access protected method"); } // Get the exact view of the signature for an array method if (sig->retType != CORINFO_TYPE_VOID) { eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass); } // "readonly." prefixed calls only allowed for the Address operation on arrays. // The methods supported by array types are under the control of the EE // so we can trust that only the Address operation returns a byref. if (readonlyCall) { typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass); VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(), "unexpected use of readonly prefix"); } // Verify the tailcall if (tailCall) { verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false); } } /***************************************************************************** * Checks that a delegate creation is done using the following pattern: * dup * ldvirtftn targetMemberRef * OR * ldftn targetMemberRef * * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if * not in this basic block) * * targetMemberRef is read from the code sequence. * targetMemberRef is validated iff verificationNeeded. */ bool Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef) { if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr)) { targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]); return true; } else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr)) { targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]); return true; } return false; } typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType) { Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref"); typeInfo ptrVal = verVerifyLDIND(tiTo, instrType); typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack(); if (!tiCompatibleWith(value, normPtrVal, true)) { Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch"); } return ptrVal; } typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType) { assert(!instrType.IsStruct()); typeInfo ptrVal; if (ptr.IsByRef()) { ptrVal = DereferenceByRef(ptr); if (instrType.IsObjRef() && !ptrVal.IsObjRef()) { Verify(false, "bad pointer"); } else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal)) { Verify(false, "pointer not consistent with instr"); } } else { Verify(false, "pointer not byref"); } return ptrVal; } // Verify that the field is used properly. 'tiThis' is NULL for statics, // 'fieldFlags' is the fields attributes, and mutator is true if it is a // ld*flda or a st*fld. // 'enclosingClass' is given if we are accessing a field in some specific type. void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken, const CORINFO_FIELD_INFO& fieldInfo, const typeInfo* tiThis, bool mutator, bool allowPlainStructAsThis) { CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass; unsigned fieldFlags = fieldInfo.fieldFlags; CORINFO_CLASS_HANDLE instanceClass = info.compClassHnd; // for statics, we imagine the instance is the current class. bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0); if (mutator) { Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static"); if ((fieldFlags & CORINFO_FLG_FIELD_FINAL)) { Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd && info.compIsStatic == isStaticField, "bad use of initonly field (set or address taken)"); } } if (tiThis == nullptr) { Verify(isStaticField, "used static opcode with non-static field"); } else { typeInfo tThis = *tiThis; if (allowPlainStructAsThis && tThis.IsValueClass()) { tThis.MakeByRef(); } // If it is null, we assume we can access it (since it will AV shortly) // If it is anything but a refernce class, there is no hierarchy, so // again, we don't need the precise instance class to compute 'protected' access if (tiThis->IsType(TI_REF)) { instanceClass = tiThis->GetClassHandleForObjRef(); } // Note that even if the field is static, we require that the this pointer // satisfy the same constraints as a non-static field This happens to // be simpler and seems reasonable typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); // we allow read-only tThis, on any field access (even stores!), because if the // class implementor wants to prohibit stores he should make the field private. // we do this by setting the read-only bit on the type we compare tThis to. tiDeclaredThis.SetIsReadonlyByRef(); } else if (verTrackObjCtorInitState && tThis.IsThisPtr()) { // Any field access is legal on "uninitialized" this pointers. // The easiest way to implement this is to simply set the // initialized bit for the duration of the type check on the // field access only. It does not change the state of the "this" // for the function as a whole. Note that the "tThis" is a copy // of the original "this" type (*tiThis) passed in. tThis.SetInitialisedObjRef(); } Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch"); } // Presently the JIT does not check that we don't store or take the address of init-only fields // since we cannot guarantee their immutability and it is not a security issue. // check any constraints on the fields's class --- accessing the field might cause a class constructor to run. VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass), "field has unsatisfied class constraints"); if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED) { Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass), "Accessing protected method through wrong type."); } } void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode) { if (tiOp1.IsNumberType()) { #ifdef TARGET_64BIT Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch"); #else // TARGET_64BIT // [10/17/2013] Consider changing this: to put on my verification lawyer hat, // this is non-conforming to the ECMA Spec: types don't have to be equivalent, // but compatible, since we can coalesce native int with int32 (see section III.1.5). Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch"); #endif // !TARGET_64BIT } else if (tiOp1.IsObjRef()) { switch (opcode) { case CEE_BEQ_S: case CEE_BEQ: case CEE_BNE_UN_S: case CEE_BNE_UN: case CEE_CEQ: case CEE_CGT_UN: break; default: Verify(false, "Cond not allowed on object types"); } Verify(tiOp2.IsObjRef(), "Cond type mismatch"); } else if (tiOp1.IsByRef()) { Verify(tiOp2.IsByRef(), "Cond type mismatch"); } else { Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch"); } } void Compiler::verVerifyThisPtrInitialised() { if (verTrackObjCtorInitState) { Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized"); } } bool Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target) { // Either target == context, in this case calling an alternate .ctor // Or target is the immediate parent of context return ((target == context) || (target == info.compCompHnd->getParentType(context))); } GenTree* Compiler::impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE)) { NO_WAY("Virtual call to a function added via EnC is not supported"); } // CoreRT generic virtual method if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI)) { GenTree* runtimeMethodHandle = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_METHOD_HDL, pCallInfo->hMethod); return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL, gtNewCallArgs(thisPtr, runtimeMethodHandle)); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { if (!pCallInfo->exactContextNeedsRuntimeLookup) { GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewCallArgs(thisPtr)); call->setEntryPoint(pCallInfo->codePointerLookup.constLookup); return call; } // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too. if (IsTargetAbi(CORINFO_CORERT_ABI)) { GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind); return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL, gtNewCallArgs(ctxTree), &pCallInfo->codePointerLookup.lookupKind); } } #endif // Get the exact descriptor for the static callsite GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken); if (exactTypeDesc == nullptr) { // compDonotInline() return nullptr; } GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken); if (exactMethodDesc == nullptr) { // compDonotInline() return nullptr; } GenTreeCall::Use* helpArgs = gtNewCallArgs(exactMethodDesc); helpArgs = gtPrependNewCallArg(exactTypeDesc, helpArgs); helpArgs = gtPrependNewCallArg(thisPtr, helpArgs); // Call helper function. This gets the target address of the final destination callsite. return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs); } //------------------------------------------------------------------------ // impBoxPatternMatch: match and import common box idioms // // Arguments: // pResolvedToken - resolved token from the box operation // codeAddr - position in IL stream after the box instruction // codeEndp - end of IL stream // // Return Value: // Number of IL bytes matched and imported, -1 otherwise // // Notes: // pResolvedToken is known to be a value type; ref type boxing // is handled in the CEE_BOX clause. int Compiler::impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const BYTE* codeAddr, const BYTE* codeEndp, bool makeInlineObservation) { if (codeAddr >= codeEndp) { return -1; } switch (codeAddr[0]) { case CEE_UNBOX_ANY: // box + unbox.any if (codeAddr + 1 + sizeof(mdToken) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 1 + sizeof(mdToken); } CORINFO_RESOLVED_TOKEN unboxResolvedToken; impResolveToken(codeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class); // See if the resolved tokens describe types that are equal. const TypeCompareState compare = info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, pResolvedToken->hClass); // If so, box/unbox.any is a nop. if (compare == TypeCompareState::Must) { JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n"); // Skip the next unbox.any instruction return 1 + sizeof(mdToken); } } break; case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: // box + br_true/false if ((codeAddr + ((codeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 0; } GenTree* const treeToBox = impStackTop().val; bool canOptimize = true; GenTree* treeToNullcheck = nullptr; // Can the thing being boxed cause a side effect? if ((treeToBox->gtFlags & GTF_SIDE_EFFECT) != 0) { // Is this a side effect we can replicate cheaply? if (((treeToBox->gtFlags & GTF_SIDE_EFFECT) == GTF_EXCEPT) && treeToBox->OperIs(GT_OBJ, GT_BLK, GT_IND)) { // Yes, we just need to perform a null check if needed. GenTree* const addr = treeToBox->AsOp()->gtGetOp1(); if (fgAddrCouldBeNull(addr)) { treeToNullcheck = addr; } } else { canOptimize = false; } } if (canOptimize) { CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); if (boxHelper == CORINFO_HELP_BOX) { JITDUMP("\n Importing BOX; BR_TRUE/FALSE as %sconstant\n", treeToNullcheck == nullptr ? "" : "nullcheck+"); impPopStack(); GenTree* result = gtNewIconNode(1); if (treeToNullcheck != nullptr) { GenTree* nullcheck = gtNewNullCheck(treeToNullcheck, compCurBB); result = gtNewOperNode(GT_COMMA, TYP_INT, nullcheck, result); } impPushOnStack(result, typeInfo(TI_INT)); return 0; } } } break; case CEE_ISINST: if (codeAddr + 1 + sizeof(mdToken) + 1 <= codeEndp) { const BYTE* nextCodeAddr = codeAddr + 1 + sizeof(mdToken); switch (nextCodeAddr[0]) { // box + isinst + br_true/false case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: if ((nextCodeAddr + ((nextCodeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 1 + sizeof(mdToken); } if (!(impStackTop().val->gtFlags & GTF_SIDE_EFFECT)) { CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); if (boxHelper == CORINFO_HELP_BOX) { CORINFO_RESOLVED_TOKEN isInstResolvedToken; impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting); TypeCompareState castResult = info.compCompHnd->compareTypesForCast(pResolvedToken->hClass, isInstResolvedToken.hClass); if (castResult != TypeCompareState::May) { JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant\n"); impPopStack(); impPushOnStack(gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0), typeInfo(TI_INT)); // Skip the next isinst instruction return 1 + sizeof(mdToken); } } else if (boxHelper == CORINFO_HELP_BOX_NULLABLE) { // For nullable we're going to fold it to "ldfld hasValue + brtrue/brfalse" or // "ldc.i4.0 + brtrue/brfalse" in case if the underlying type is not castable to // the target type. CORINFO_RESOLVED_TOKEN isInstResolvedToken; impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting); CORINFO_CLASS_HANDLE nullableCls = pResolvedToken->hClass; CORINFO_CLASS_HANDLE underlyingCls = info.compCompHnd->getTypeForBox(nullableCls); TypeCompareState castResult = info.compCompHnd->compareTypesForCast(underlyingCls, isInstResolvedToken.hClass); if (castResult == TypeCompareState::Must) { const CORINFO_FIELD_HANDLE hasValueFldHnd = info.compCompHnd->getFieldInClass(nullableCls, 0); assert(info.compCompHnd->getFieldOffset(hasValueFldHnd) == 0); assert(!strcmp(info.compCompHnd->getFieldName(hasValueFldHnd, nullptr), "hasValue")); GenTree* objToBox = impPopStack().val; // Spill struct to get its address (to access hasValue field) objToBox = impGetStructAddr(objToBox, nullableCls, (unsigned)CHECK_SPILL_ALL, true); impPushOnStack(gtNewFieldRef(TYP_BOOL, hasValueFldHnd, objToBox, 0), typeInfo(TI_INT)); JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as nullableVT.hasValue\n"); return 1 + sizeof(mdToken); } else if (castResult == TypeCompareState::MustNot) { impPopStack(); impPushOnStack(gtNewIconNode(0), typeInfo(TI_INT)); JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant (false)\n"); return 1 + sizeof(mdToken); } } } } break; // box + isinst + unbox.any case CEE_UNBOX_ANY: if ((nextCodeAddr + 1 + sizeof(mdToken)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 2 + sizeof(mdToken) * 2; } // See if the resolved tokens in box, isinst and unbox.any describe types that are equal. CORINFO_RESOLVED_TOKEN isinstResolvedToken = {}; impResolveToken(codeAddr + 1, &isinstResolvedToken, CORINFO_TOKENKIND_Class); if (info.compCompHnd->compareTypesForEquality(isinstResolvedToken.hClass, pResolvedToken->hClass) == TypeCompareState::Must) { CORINFO_RESOLVED_TOKEN unboxResolvedToken = {}; impResolveToken(nextCodeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class); // If so, box + isinst + unbox.any is a nop. if (info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, pResolvedToken->hClass) == TypeCompareState::Must) { JITDUMP("\n Importing BOX; ISINST, UNBOX.ANY as NOP\n"); return 2 + sizeof(mdToken) * 2; } } } break; } } break; default: break; } return -1; } //------------------------------------------------------------------------ // impImportAndPushBox: build and import a value-type box // // Arguments: // pResolvedToken - resolved token from the box operation // // Return Value: // None. // // Side Effects: // The value to be boxed is popped from the stack, and a tree for // the boxed value is pushed. This method may create upstream // statements, spill side effecting trees, and create new temps. // // If importing an inlinee, we may also discover the inline must // fail. If so there is no new value pushed on the stack. Callers // should use CompDoNotInline after calling this method to see if // ongoing importation should be aborted. // // Notes: // Boxing of ref classes results in the same value as the value on // the top of the stack, so is handled inline in impImportBlockCode // for the CEE_BOX case. Only value or primitive type boxes make it // here. // // Boxing for nullable types is done via a helper call; boxing // of other value types is expanded inline or handled via helper // call, depending on the jit's codegen mode. // // When the jit is operating in size and time constrained modes, // using a helper call here can save jit time and code size. But it // also may inhibit cleanup optimizations that could have also had a // even greater benefit effect on code size and jit time. An optimal // strategy may need to peek ahead and see if it is easy to tell how // the box is being used. For now, we defer. void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) { // Spill any special side effects impSpillSpecialSideEff(); // Get get the expression to box from the stack. GenTree* op1 = nullptr; GenTree* op2 = nullptr; StackEntry se = impPopStack(); CORINFO_CLASS_HANDLE operCls = se.seTypeInfo.GetClassHandle(); GenTree* exprToBox = se.val; // Look at what helper we should use. CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); // Determine what expansion to prefer. // // In size/time/debuggable constrained modes, the helper call // expansion for box is generally smaller and is preferred, unless // the value to box is a struct that comes from a call. In that // case the call can construct its return value directly into the // box payload, saving possibly some up-front zeroing. // // Currently primitive type boxes always get inline expanded. We may // want to do the same for small structs if they don't come from // calls and don't have GC pointers, since explicitly copying such // structs is cheap. JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via"); bool canExpandInline = (boxHelper == CORINFO_HELP_BOX); bool optForSize = !exprToBox->IsCall() && (operCls != nullptr) && opts.OptimizationDisabled(); bool expandInline = canExpandInline && !optForSize; if (expandInline) { JITDUMP(" inline allocate/copy sequence\n"); // we are doing 'normal' boxing. This means that we can inline the box operation // Box(expr) gets morphed into // temp = new(clsHnd) // cpobj(temp+4, expr, clsHnd) // push temp // The code paths differ slightly below for structs and primitives because // "cpobj" differs in these cases. In one case you get // impAssignStructPtr(temp+4, expr, clsHnd) // and the other you get // *(temp+4) = expr if (opts.OptimizationDisabled()) { // For minopts/debug code, try and minimize the total number // of box temps by reusing an existing temp when possible. if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM) { impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper")); } } else { // When optimizing, use a new temp for each box operation // since we then know the exact class of the box temp. impBoxTemp = lvaGrabTemp(true DEBUGARG("Single-def Box Helper")); lvaTable[impBoxTemp].lvType = TYP_REF; lvaTable[impBoxTemp].lvSingleDef = 1; JITDUMP("Marking V%02u as a single def local\n", impBoxTemp); const bool isExact = true; lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact); } // needs to stay in use until this box expression is appended // some other node. We approximate this by keeping it alive until // the opcode stack becomes empty impBoxTempInUse = true; // Remember the current last statement in case we need to move // a range of statements to ensure the box temp is initialized // before it's used. // Statement* const cursor = impLastStmt; const bool useParent = false; op1 = gtNewAllocObjNode(pResolvedToken, useParent); if (op1 == nullptr) { // If we fail to create the newobj node, we must be inlining // and have run across a type we can't describe. // assert(compDonotInline()); return; } // Remember that this basic block contains 'new' of an object, // and so does this method // compCurBB->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; // Assign the boxed object to the box temp. // GenTree* asg = gtNewTempAssign(impBoxTemp, op1); Statement* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // If the exprToBox is a call that returns its value via a ret buf arg, // move the assignment statement(s) before the call (which must be a top level tree). // // We do this because impAssignStructPtr (invoked below) will // back-substitute into a call when it sees a GT_RET_EXPR and the call // has a hidden buffer pointer, So we need to reorder things to avoid // creating out-of-sequence IR. // if (varTypeIsStruct(exprToBox) && exprToBox->OperIs(GT_RET_EXPR)) { GenTreeCall* const call = exprToBox->AsRetExpr()->gtInlineCandidate->AsCall(); if (call->HasRetBufArg()) { JITDUMP("Must insert newobj stmts for box before call [%06u]\n", dspTreeID(call)); // Walk back through the statements in this block, looking for the one // that has this call as the root node. // // Because gtNewTempAssign (above) may have added statements that // feed into the actual assignment we need to move this set of added // statements as a group. // // Note boxed allocations are side-effect free (no com or finalizer) so // our only worries here are (correctness) not overlapping the box temp // lifetime and (perf) stretching the temp lifetime across the inlinee // body. // // Since this is an inline candidate, we must be optimizing, and so we have // a unique box temp per call. So no worries about overlap. // assert(!opts.OptimizationDisabled()); // Lifetime stretching could addressed with some extra cleverness--sinking // the allocation back down to just before the copy, once we figure out // where the copy is. We defer for now. // Statement* insertBeforeStmt = cursor; noway_assert(insertBeforeStmt != nullptr); while (true) { if (insertBeforeStmt->GetRootNode() == call) { break; } // If we've searched all the statements in the block and failed to // find the call, then something's wrong. // noway_assert(insertBeforeStmt != impStmtList); insertBeforeStmt = insertBeforeStmt->GetPrevStmt(); } // Found the call. Move the statements comprising the assignment. // JITDUMP("Moving " FMT_STMT "..." FMT_STMT " before " FMT_STMT "\n", cursor->GetNextStmt()->GetID(), asgStmt->GetID(), insertBeforeStmt->GetID()); assert(asgStmt == impLastStmt); do { Statement* movingStmt = impExtractLastStmt(); impInsertStmtBefore(movingStmt, insertBeforeStmt); insertBeforeStmt = movingStmt; } while (impLastStmt != cursor); } } // Create a pointer to the box payload in op1. // op1 = gtNewLclvNode(impBoxTemp, TYP_REF); op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2); // Copy from the exprToBox to the box payload. // if (varTypeIsStruct(exprToBox)) { assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls)); op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL); } else { var_types lclTyp = exprToBox->TypeGet(); if (lclTyp == TYP_BYREF) { lclTyp = TYP_I_IMPL; } CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass); if (impIsPrimitive(jitType)) { lclTyp = JITtype2varType(jitType); } var_types srcTyp = exprToBox->TypeGet(); var_types dstTyp = lclTyp; // We allow float <-> double mismatches and implicit truncation for small types. assert((genActualType(srcTyp) == genActualType(dstTyp)) || (varTypeIsFloating(srcTyp) == varTypeIsFloating(dstTyp))); // Note regarding small types. // We are going to store to the box here via an indirection, so the cast added below is // redundant, since the store has an implicit truncation semantic. The reason we still // add this cast is so that the code which deals with GT_BOX optimizations does not have // to account for this implicit truncation (e. g. understand that BOX<byte>(0xFF + 1) is // actually BOX<byte>(0) or deal with signedness mismatch and other GT_CAST complexities). if (srcTyp != dstTyp) { exprToBox = gtNewCastNode(genActualType(dstTyp), exprToBox, false, dstTyp); } op1 = gtNewAssignNode(gtNewOperNode(GT_IND, dstTyp, op1), exprToBox); } // Spill eval stack to flush out any pending side effects. impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox")); // Set up this copy as a second assignment. Statement* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); op1 = gtNewLclvNode(impBoxTemp, TYP_REF); // Record that this is a "box" node and keep track of the matching parts. op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt); // If it is a value class, mark the "box" node. We can use this information // to optimise several cases: // "box(x) == null" --> false // "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod" // "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod" op1->gtFlags |= GTF_BOX_VALUE; assert(op1->IsBoxedValue()); assert(asg->gtOper == GT_ASG); } else { // Don't optimize, just call the helper and be done with it. JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable"); assert(operCls != nullptr); // Ensure that the value class is restored op2 = impTokenToHandle(pResolvedToken, nullptr, true /* mustRestoreHandle */); if (op2 == nullptr) { // We must be backing out of an inline. assert(compDonotInline()); return; } GenTreeCall::Use* args = gtNewCallArgs(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true)); op1 = gtNewHelperCallNode(boxHelper, TYP_REF, args); } /* Push the result back on the stack, */ /* even if clsHnd is a value class we want the TI_REF */ typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass)); impPushOnStack(op1, tiRetVal); } //------------------------------------------------------------------------ // impImportNewObjArray: Build and import `new` of multi-dimmensional array // // Arguments: // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized // by a call to CEEInfo::resolveToken(). // pCallInfo - The CORINFO_CALL_INFO that has been initialized // by a call to CEEInfo::getCallInfo(). // // Assumptions: // The multi-dimensional array constructor arguments (array dimensions) are // pushed on the IL stack on entry to this method. // // Notes: // Multi-dimensional array constructors are imported as calls to a JIT // helper, not as regular calls. void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken); if (classHandle == nullptr) { // compDonotInline() return; } assert(pCallInfo->sig.numArgs); GenTree* node; // Reuse the temp used to pass the array dimensions to avoid bloating // the stack frame in case there are multiple calls to multi-dim array // constructors within a single method. if (lvaNewObjArrayArgs == BAD_VAR_NUM) { lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs")); lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK; lvaTable[lvaNewObjArrayArgs].lvExactSize = 0; } // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers // for our call to CORINFO_HELP_NEW_MDARR. lvaTable[lvaNewObjArrayArgs].lvExactSize = max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32)); // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments // to one allocation at a time. impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray")); // // The arguments of the CORINFO_HELP_NEW_MDARR helper are: // - Array class handle // - Number of dimension arguments // - Pointer to block of int32 dimensions - address of lvaNewObjArrayArgs temp. // node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK); node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node); // Pop dimension arguments from the stack one at a time and store it // into lvaNewObjArrayArgs temp. for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--) { GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT); GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK); dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest); dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest, new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i)); dest = gtNewOperNode(GT_IND, TYP_INT, dest); node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node); } GenTreeCall::Use* args = gtNewCallArgs(node); // pass number of arguments to the helper args = gtPrependNewCallArg(gtNewIconNode(pCallInfo->sig.numArgs), args); args = gtPrependNewCallArg(classHandle, args); node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args); for (GenTreeCall::Use& use : node->AsCall()->Args()) { node->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT; } node->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass; // Remember that this basic block contains 'new' of a md array compCurBB->bbFlags |= BBF_HAS_NEWARRAY; impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass)); } GenTree* Compiler::impTransformThis(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM transform) { switch (transform) { case CORINFO_DEREF_THIS: { GenTree* obj = thisPtr; // This does a LDIND on the obj, which should be a byref. pointing to a ref impBashVarAddrsToI(obj); assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF); CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass); obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj); // ldind could point anywhere, example a boxed class static int obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE); return obj; } case CORINFO_BOX_THIS: { // Constraint calls where there might be no // unboxed entry point require us to implement the call via helper. // These only occur when a possible target of the call // may have inherited an implementation of an interface // method from System.Object or System.ValueType. The EE does not provide us with // "unboxed" versions of these methods. GenTree* obj = thisPtr; assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL); obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj); obj->gtFlags |= GTF_EXCEPT; CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass); if (impIsPrimitive(jitTyp)) { if (obj->OperIsBlk()) { obj->ChangeOperUnchecked(GT_IND); // Obj could point anywhere, example a boxed class static int obj->gtFlags |= GTF_IND_TGTANYWHERE; obj->AsOp()->gtOp2 = nullptr; // must be zero for tree walkers } obj->gtType = JITtype2varType(jitTyp); assert(varTypeIsArithmetic(obj->gtType)); } // This pushes on the dereferenced byref // This is then used immediately to box. impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack()); // This pops off the byref-to-a-value-type remaining on the stack and // replaces it with a boxed object. // This is then used as the object to the virtual call immediately below. impImportAndPushBox(pConstrainedResolvedToken); if (compDonotInline()) { return nullptr; } obj = impPopStack().val; return obj; } case CORINFO_NO_THIS_TRANSFORM: default: return thisPtr; } } //------------------------------------------------------------------------ // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method. // // Return Value: // true if PInvoke inlining should be enabled in current method, false otherwise // // Notes: // Checks a number of ambient conditions where we could pinvoke but choose not to bool Compiler::impCanPInvokeInline() { return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) && (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke ; } //------------------------------------------------------------------------ // impCanPInvokeInlineCallSite: basic legality checks using information // from a call to see if the call qualifies as an inline pinvoke. // // Arguments: // block - block contaning the call, or for inlinees, block // containing the call being inlined // // Return Value: // true if this call can legally qualify as an inline pinvoke, false otherwise // // Notes: // For runtimes that support exception handling interop there are // restrictions on using inline pinvoke in handler regions. // // * We have to disable pinvoke inlining inside of filters because // in case the main execution (i.e. in the try block) is inside // unmanaged code, we cannot reuse the inlined stub (we still need // the original state until we are in the catch handler) // // * We disable pinvoke inlining inside handlers since the GSCookie // is in the inlined Frame (see // CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but // this would not protect framelets/return-address of handlers. // // These restrictions are currently also in place for CoreCLR but // can be relaxed when coreclr/#8459 is addressed. bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block) { if (block->hasHndIndex()) { return false; } // The remaining limitations do not apply to CoreRT if (IsTargetAbi(CORINFO_CORERT_ABI)) { return true; } #ifdef TARGET_64BIT // On 64-bit platforms, we disable pinvoke inlining inside of try regions. // Note that this could be needed on other architectures too, but we // haven't done enough investigation to know for sure at this point. // // Here is the comment from JIT64 explaining why: // [VSWhidbey: 611015] - because the jitted code links in the // Frame (instead of the stub) we rely on the Frame not being // 'active' until inside the stub. This normally happens by the // stub setting the return address pointer in the Frame object // inside the stub. On a normal return, the return address // pointer is zeroed out so the Frame can be safely re-used, but // if an exception occurs, nobody zeros out the return address // pointer. Thus if we re-used the Frame object, it would go // 'active' as soon as we link it into the Frame chain. // // Technically we only need to disable PInvoke inlining if we're // in a handler or if we're in a try body with a catch or // filter/except where other non-handler code in this method // might run and try to re-use the dirty Frame object. // // A desktop test case where this seems to matter is // jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe if (block->hasTryIndex()) { // This does not apply to the raw pinvoke call that is inside the pinvoke // ILStub. In this case, we have to inline the raw pinvoke call into the stub, // otherwise we would end up with a stub that recursively calls itself, and end // up with a stack overflow. if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers()) { return true; } return false; } #endif // TARGET_64BIT return true; } //------------------------------------------------------------------------ // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so // if it can be expressed as an inline pinvoke. // // Arguments: // call - tree for the call // methHnd - handle for the method being called (may be null) // sig - signature of the method being called // mflags - method flags for the method being called // block - block contaning the call, or for inlinees, block // containing the call being inlined // // Notes: // Sets GTF_CALL_M_PINVOKE on the call for pinvokes. // // Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the // call passes a combination of legality and profitabilty checks. // // If GTF_CALL_UNMANAGED is set, increments info.compUnmanagedCallCountWithGCTransition void Compiler::impCheckForPInvokeCall( GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block) { CorInfoCallConvExtension unmanagedCallConv; // If VM flagged it as Pinvoke, flag the call node accordingly if ((mflags & CORINFO_FLG_PINVOKE) != 0) { call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE; } bool suppressGCTransition = false; if (methHnd) { if ((mflags & CORINFO_FLG_PINVOKE) == 0) { return; } unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd, nullptr, &suppressGCTransition); } else { if (sig->getCallConv() == CORINFO_CALLCONV_DEFAULT || sig->getCallConv() == CORINFO_CALLCONV_VARARG) { return; } unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(nullptr, sig, &suppressGCTransition); assert(!call->gtCallCookie); } if (suppressGCTransition) { call->gtCallMoreFlags |= GTF_CALL_M_SUPPRESS_GC_TRANSITION; } // If we can't get the unmanaged calling convention or the calling convention is unsupported in the JIT, // return here without inlining the native call. if (unmanagedCallConv == CorInfoCallConvExtension::Managed || unmanagedCallConv == CorInfoCallConvExtension::Fastcall || unmanagedCallConv == CorInfoCallConvExtension::FastcallMemberFunction) { return; } optNativeCallCount++; if (methHnd == nullptr && (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) || IsTargetAbi(CORINFO_CORERT_ABI))) { // PInvoke in CoreRT ABI must be always inlined. Non-inlineable CALLI cases have been // converted to regular method calls earlier using convertPInvokeCalliToCall. // PInvoke CALLI in IL stubs must be inlined } else { // Check legality if (!impCanPInvokeInlineCallSite(block)) { return; } // Legal PInvoke CALL in PInvoke IL stubs must be inlined to avoid infinite recursive // inlining in CoreRT. Skip the ambient conditions checks and profitability checks. if (!IsTargetAbi(CORINFO_CORERT_ABI) || (info.compFlags & CORINFO_FLG_PINVOKE) == 0) { if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers()) { // Raw PInvoke call in PInvoke IL stub generated must be inlined to avoid infinite // recursive calls to the stub. } else { if (!impCanPInvokeInline()) { return; } // Size-speed tradeoff: don't use inline pinvoke at rarely // executed call sites. The non-inline version is more // compact. if (block->isRunRarely()) { return; } } } // The expensive check should be last if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig)) { return; } } JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s\n", info.compFullName)); call->gtFlags |= GTF_CALL_UNMANAGED; call->unmgdCallConv = unmanagedCallConv; if (!call->IsSuppressGCTransition()) { info.compUnmanagedCallCountWithGCTransition++; } // AMD64 convention is same for native and managed if (unmanagedCallConv == CorInfoCallConvExtension::C || unmanagedCallConv == CorInfoCallConvExtension::CMemberFunction) { call->gtFlags |= GTF_CALL_POP_ARGS; } if (unmanagedCallConv == CorInfoCallConvExtension::Thiscall) { call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL; } } GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di) { var_types callRetTyp = JITtype2varType(sig->retType); /* The function pointer is on top of the stack - It may be a * complex expression. As it is evaluated after the args, * it may cause registered args to be spilled. Simply spill it. */ // Ignore this trivial case. if (impStackTop().val->gtOper != GT_LCL_VAR) { impSpillStackEntry(verCurrentState.esStackDepth - 1, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall")); } /* Get the function pointer */ GenTree* fptr = impPopStack().val; // The function pointer is typically a sized to match the target pointer size // However, stubgen IL optimization can change LDC.I8 to LDC.I4 // See ILCodeStream::LowerOpcode assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT); #ifdef DEBUG // This temporary must never be converted to a double in stress mode, // because that can introduce a call to the cast helper after the // arguments have already been evaluated. if (fptr->OperGet() == GT_LCL_VAR) { lvaTable[fptr->AsLclVarCommon()->GetLclNum()].lvKeepType = 1; } #endif /* Create the call node */ GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, di); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); #ifdef UNIX_X86_ABI call->gtFlags &= ~GTF_CALL_POP_ARGS; #endif return call; } /*****************************************************************************/ void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig) { assert(call->gtFlags & GTF_CALL_UNMANAGED); /* Since we push the arguments in reverse order (i.e. right -> left) * spill any side effects from the stack * * OBS: If there is only one side effect we do not need to spill it * thus we have to spill all side-effects except last one */ unsigned lastLevelWithSideEffects = UINT_MAX; unsigned argsToReverse = sig->numArgs; // For "thiscall", the first argument goes in a register. Since its // order does not need to be changed, we do not need to spill it if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { assert(argsToReverse); argsToReverse--; } #ifndef TARGET_X86 // Don't reverse args on ARM or x64 - first four args always placed in regs in order argsToReverse = 0; #endif for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++) { if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF) { assert(lastLevelWithSideEffects == UINT_MAX); impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect")); } else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) { if (lastLevelWithSideEffects != UINT_MAX) { /* We had a previous side effect - must spill it */ impSpillStackEntry(lastLevelWithSideEffects, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect")); /* Record the level for the current side effect in case we will spill it */ lastLevelWithSideEffects = level; } else { /* This is the first side effect encountered - record its level */ lastLevelWithSideEffects = level; } } } /* The argument list is now "clean" - no out-of-order side effects * Pop the argument list in reverse order */ GenTreeCall::Use* args = impPopReverseCallArgs(sig->numArgs, sig, sig->numArgs - argsToReverse); call->AsCall()->gtCallArgs = args; if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { GenTree* thisPtr = args->GetNode(); impBashVarAddrsToI(thisPtr); assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF); } for (GenTreeCall::Use& argUse : GenTreeCall::UseList(args)) { GenTree* arg = argUse.GetNode(); call->gtFlags |= arg->gtFlags & GTF_GLOB_EFFECT; // We should not be passing gc typed args to an unmanaged call. if (varTypeIsGC(arg->TypeGet())) { // Tolerate byrefs by retyping to native int. // // This is needed or we'll generate inconsistent GC info // for this arg at the call site (gc info says byref, // pinvoke sig says native int). // if (arg->TypeGet() == TYP_BYREF) { arg->ChangeType(TYP_I_IMPL); } else { assert(!"*** invalid IL: gc ref passed to unmanaged call"); } } } } //------------------------------------------------------------------------ // impInitClass: Build a node to initialize the class before accessing the // field if necessary // // Arguments: // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized // by a call to CEEInfo::resolveToken(). // // Return Value: If needed, a pointer to the node that will perform the class // initializtion. Otherwise, nullptr. // GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken) { CorInfoInitClassResult initClassResult = info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle); if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0) { return nullptr; } bool runtimeLookup; GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup); if (node == nullptr) { assert(compDonotInline()); return nullptr; } if (runtimeLookup) { node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewCallArgs(node)); } else { // Call the shared non gc static helper, as its the fastest node = fgGetSharedCCtor(pResolvedToken->hClass); } return node; } GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp) { GenTree* op1 = nullptr; #if defined(DEBUG) // If we're replaying under SuperPMI, we're going to read the data stored by SuperPMI and use it // for optimization. Unfortunately, SuperPMI doesn't implement a guarantee on the alignment of // this data, so for some platforms which don't allow unaligned access (e.g., Linux arm32), // this can fault. We should fix SuperPMI to guarantee alignment, but that is a big change. // Instead, simply fix up the data here for future use. // This variable should be the largest size element, with the largest alignment requirement, // and the native C++ compiler should guarantee sufficient alignment. double aligned_data = 0.0; void* p_aligned_data = &aligned_data; if (info.compMethodSuperPMIIndex != -1) { switch (lclTyp) { case TYP_BOOL: case TYP_BYTE: case TYP_UBYTE: static_assert_no_msg(sizeof(unsigned __int8) == sizeof(bool)); static_assert_no_msg(sizeof(unsigned __int8) == sizeof(signed char)); static_assert_no_msg(sizeof(unsigned __int8) == sizeof(unsigned char)); // No alignment necessary for byte. break; case TYP_SHORT: case TYP_USHORT: static_assert_no_msg(sizeof(unsigned __int16) == sizeof(short)); static_assert_no_msg(sizeof(unsigned __int16) == sizeof(unsigned short)); if ((size_t)fldAddr % sizeof(unsigned __int16) != 0) { *(unsigned __int16*)p_aligned_data = GET_UNALIGNED_16(fldAddr); fldAddr = p_aligned_data; } break; case TYP_INT: case TYP_UINT: case TYP_FLOAT: static_assert_no_msg(sizeof(unsigned __int32) == sizeof(int)); static_assert_no_msg(sizeof(unsigned __int32) == sizeof(unsigned int)); static_assert_no_msg(sizeof(unsigned __int32) == sizeof(float)); if ((size_t)fldAddr % sizeof(unsigned __int32) != 0) { *(unsigned __int32*)p_aligned_data = GET_UNALIGNED_32(fldAddr); fldAddr = p_aligned_data; } break; case TYP_LONG: case TYP_ULONG: case TYP_DOUBLE: static_assert_no_msg(sizeof(unsigned __int64) == sizeof(__int64)); static_assert_no_msg(sizeof(unsigned __int64) == sizeof(double)); if ((size_t)fldAddr % sizeof(unsigned __int64) != 0) { *(unsigned __int64*)p_aligned_data = GET_UNALIGNED_64(fldAddr); fldAddr = p_aligned_data; } break; default: assert(!"Unexpected lclTyp"); break; } } #endif // DEBUG switch (lclTyp) { int ival; __int64 lval; double dval; case TYP_BOOL: ival = *((bool*)fldAddr); goto IVAL_COMMON; case TYP_BYTE: ival = *((signed char*)fldAddr); goto IVAL_COMMON; case TYP_UBYTE: ival = *((unsigned char*)fldAddr); goto IVAL_COMMON; case TYP_SHORT: ival = *((short*)fldAddr); goto IVAL_COMMON; case TYP_USHORT: ival = *((unsigned short*)fldAddr); goto IVAL_COMMON; case TYP_UINT: case TYP_INT: ival = *((int*)fldAddr); IVAL_COMMON: op1 = gtNewIconNode(ival); break; case TYP_LONG: case TYP_ULONG: lval = *((__int64*)fldAddr); op1 = gtNewLconNode(lval); break; case TYP_FLOAT: dval = *((float*)fldAddr); op1 = gtNewDconNode(dval); op1->gtType = TYP_FLOAT; break; case TYP_DOUBLE: dval = *((double*)fldAddr); op1 = gtNewDconNode(dval); break; default: assert(!"Unexpected lclTyp"); break; } return op1; } GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp) { // Ordinary static fields never overlap. RVA statics, however, can overlap (if they're // mapped to the same ".data" declaration). That said, such mappings only appear to be // possible with ILASM, and in ILASM-produced (ILONLY) images, RVA statics are always // read-only (using "stsfld" on them is UB). In mixed-mode assemblies, RVA statics can // be mutable, but the only current producer of such images, the C++/CLI compiler, does // not appear to support mapping different fields to the same address. So we will say // that "mutable overlapping RVA statics" are UB as well. If this ever changes, code in // morph and value numbering will need to be updated to respect "gtFldMayOverlap" and // "NotAField FldSeq". // For statics that are not "boxed", the initial address tree will contain the field sequence. // For those that are, we will attach it later, when adding the indirection for the box, since // that tree will represent the true address. bool isBoxedStatic = (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) != 0; FieldSeqNode* innerFldSeq = !isBoxedStatic ? GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField) : FieldSeqStore::NotAField(); GenTree* op1; switch (pFieldInfo->fieldAccessor) { case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: { assert(!compIsForInlining()); // We first call a special helper to get the statics base pointer op1 = impParentClassTokenToHandle(pResolvedToken); // compIsForInlining() is false so we should not get NULL here assert(op1 != nullptr); var_types type = TYP_BYREF; switch (pFieldInfo->helper) { case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE: type = TYP_I_IMPL; break; case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE: case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE: case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE: break; default: assert(!"unknown generic statics helper"); break; } op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewCallArgs(op1)); op1 = gtNewOperNode(GT_ADD, type, op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); } break; case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeFlags callFlags = GTF_EMPTY; if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT) { callFlags |= GTF_CALL_HOISTABLE; } op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF); op1->gtFlags |= callFlags; op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup); } else #endif { op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper); } op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); break; } case CORINFO_FIELD_STATIC_READYTORUN_HELPER: { #ifdef FEATURE_READYTORUN assert(opts.IsReadyToRun()); assert(!compIsForInlining()); CORINFO_LOOKUP_KIND kind; info.compCompHnd->getLocationOfThisType(info.compMethodHnd, &kind); assert(kind.needsRuntimeLookup); GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind); GenTreeCall::Use* args = gtNewCallArgs(ctxTree); GenTreeFlags callFlags = GTF_EMPTY; if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT) { callFlags |= GTF_CALL_HOISTABLE; } var_types type = TYP_BYREF; op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args); op1->gtFlags |= callFlags; op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup); op1 = gtNewOperNode(GT_ADD, type, op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); #else unreached(); #endif // FEATURE_READYTORUN } break; default: { // Do we need the address of a static field? // if (access & CORINFO_ACCESS_ADDRESS) { void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr); // We should always be able to access this static's address directly. assert(pFldAddr == nullptr); // Create the address node. GenTreeFlags handleKind = isBoxedStatic ? GTF_ICON_STATIC_BOX_PTR : GTF_ICON_STATIC_HDL; op1 = gtNewIconHandleNode((size_t)fldAddr, handleKind, innerFldSeq); #ifdef DEBUG op1->AsIntCon()->gtTargetHandle = op1->AsIntCon()->gtIconVal; #endif if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { op1->gtFlags |= GTF_ICON_INITCLASS; } } else // We need the value of a static field { // In future, it may be better to just create the right tree here instead of folding it later. op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField); if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { op1->gtFlags |= GTF_FLD_INITCLASS; } if (isBoxedStatic) { FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField); op1->ChangeType(TYP_REF); // points at boxed object op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq)); if (varTypeIsStruct(lclTyp)) { // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT. op1 = gtNewObjNode(pFieldInfo->structType, op1); } else { op1 = gtNewOperNode(GT_IND, lclTyp, op1); op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING; } } return op1; } break; } } if (isBoxedStatic) { FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField); op1 = gtNewOperNode(GT_IND, TYP_REF, op1); op1->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq)); } if (!(access & CORINFO_ACCESS_ADDRESS)) { if (varTypeIsStruct(lclTyp)) { // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT. op1 = gtNewObjNode(pFieldInfo->structType, op1); } else { op1 = gtNewOperNode(GT_IND, lclTyp, op1); op1->gtFlags |= GTF_GLOB_REF; } } return op1; } // In general try to call this before most of the verification work. Most people expect the access // exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns // out if you can't access something we also think that you're unverifiable for other reasons. void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall) { if (result != CORINFO_ACCESS_ALLOWED) { impHandleAccessAllowedInternal(result, helperCall); } } void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall) { switch (result) { case CORINFO_ACCESS_ALLOWED: break; case CORINFO_ACCESS_ILLEGAL: // if we're verifying, then we need to reject the illegal access to ensure that we don't think the // method is verifiable. Otherwise, delay the exception to runtime. if (compIsForImportOnly()) { info.compCompHnd->ThrowExceptionForHelper(helperCall); } else { impInsertHelperCall(helperCall); } break; } } void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo) { // Construct the argument list GenTreeCall::Use* args = nullptr; assert(helperInfo->helperNum != CORINFO_HELP_UNDEF); for (unsigned i = helperInfo->numArgs; i > 0; --i) { const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1]; GenTree* currentArg = nullptr; switch (helperArg.argType) { case CORINFO_HELPER_ARG_TYPE_Field: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun( info.compCompHnd->getFieldClass(helperArg.fieldHandle)); currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle); break; case CORINFO_HELPER_ARG_TYPE_Method: info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle); currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle); break; case CORINFO_HELPER_ARG_TYPE_Class: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle); currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle); break; case CORINFO_HELPER_ARG_TYPE_Module: currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle); break; case CORINFO_HELPER_ARG_TYPE_Const: currentArg = gtNewIconNode(helperArg.constant); break; default: NO_WAY("Illegal helper arg type"); } args = gtPrependNewCallArg(currentArg, args); } /* TODO-Review: * Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee. * Also, consider sticking this in the first basic block. */ GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args); impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } //------------------------------------------------------------------------ // impTailCallRetTypeCompatible: Checks whether the return types of caller // and callee are compatible so that calle can be tail called. // sizes are not supported integral type sizes return values to temps. // // Arguments: // allowWidening -- whether to allow implicit widening by the callee. // For instance, allowing int32 -> int16 tailcalls. // The managed calling convention allows this, but // we don't want explicit tailcalls to depend on this // detail of the managed calling convention. // callerRetType -- the caller's return type // callerRetTypeClass - the caller's return struct type // callerCallConv -- calling convention of the caller // calleeRetType -- the callee's return type // calleeRetTypeClass - the callee return struct type // calleeCallConv -- calling convention of the callee // // Returns: // True if the tailcall types are compatible. // // Remarks: // Note that here we don't check compatibility in IL Verifier sense, but on the // lines of return types getting returned in the same return register. bool Compiler::impTailCallRetTypeCompatible(bool allowWidening, var_types callerRetType, CORINFO_CLASS_HANDLE callerRetTypeClass, CorInfoCallConvExtension callerCallConv, var_types calleeRetType, CORINFO_CLASS_HANDLE calleeRetTypeClass, CorInfoCallConvExtension calleeCallConv) { // Early out if the types are the same. if (callerRetType == calleeRetType) { return true; } // For integral types the managed calling convention dictates that callee // will widen the return value to 4 bytes, so we can allow implicit widening // in managed to managed tailcalls when dealing with <= 4 bytes. bool isManaged = (callerCallConv == CorInfoCallConvExtension::Managed) && (calleeCallConv == CorInfoCallConvExtension::Managed); if (allowWidening && isManaged && varTypeIsIntegral(callerRetType) && varTypeIsIntegral(calleeRetType) && (genTypeSize(callerRetType) <= 4) && (genTypeSize(calleeRetType) <= genTypeSize(callerRetType))) { return true; } // If the class handles are the same and not null, the return types are compatible. if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass)) { return true; } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Jit64 compat: if (callerRetType == TYP_VOID) { // This needs to be allowed to support the following IL pattern that Jit64 allows: // tail.call // pop // ret // // Note that the above IL pattern is not valid as per IL verification rules. // Therefore, only full trust code can take advantage of this pattern. return true; } // These checks return true if the return value type sizes are the same and // get returned in the same return register i.e. caller doesn't need to normalize // return value. Some of the tail calls permitted by below checks would have // been rejected by IL Verifier before we reached here. Therefore, only full // trust code can make those tail calls. unsigned callerRetTypeSize = 0; unsigned calleeRetTypeSize = 0; bool isCallerRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true, info.compIsVarArgs, callerCallConv); bool isCalleeRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true, info.compIsVarArgs, calleeCallConv); if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg) { return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize); } #endif // TARGET_AMD64 || TARGET_ARM64 return false; } /******************************************************************************** * * Returns true if the current opcode and and the opcodes following it correspond * to a supported tail call IL pattern. * */ bool Compiler::impIsTailCallILPattern( bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive) { // Bail out if the current opcode is not a call. if (!impOpcodeIsCallOpcode(curOpcode)) { return false; } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // If shared ret tail opt is not enabled, we will enable // it for recursive methods. if (isRecursive) #endif { // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the // sequence. Make sure we don't go past the end of the IL however. codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize); } // Bail out if there is no next opcode after call if (codeAddrOfNextOpcode >= codeEnd) { return false; } OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode); return (nextOpcode == CEE_RET); } /***************************************************************************** * * Determine whether the call could be converted to an implicit tail call * */ bool Compiler::impIsImplicitTailCallCandidate( OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive) { #if FEATURE_TAILCALL_OPT if (!opts.compTailCallOpt) { return false; } if (opts.OptimizationDisabled()) { return false; } // must not be tail prefixed if (prefixFlags & PREFIX_TAILCALL_EXPLICIT) { return false; } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // the block containing call is marked as BBJ_RETURN // We allow shared ret tail call optimization on recursive calls even under // !FEATURE_TAILCALL_OPT_SHARED_RETURN. if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN)) return false; #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN // must be call+ret or call+pop+ret if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive)) { return false; } return true; #else return false; #endif // FEATURE_TAILCALL_OPT } //------------------------------------------------------------------------ // impImportCall: import a call-inspiring opcode // // Arguments: // opcode - opcode that inspires the call // pResolvedToken - resolved token for the call target // pConstrainedResolvedToken - resolved constraint token (or nullptr) // newObjThis - tree for this pointer or uninitalized newobj temp (or nullptr) // prefixFlags - IL prefix flags for the call // callInfo - EE supplied info for the call // rawILOffset - IL offset of the opcode, used for guarded devirtualization. // // Returns: // Type of the call's return value. // If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF. // However we can't assert for this here yet because there are cases we miss. See issue #13272. // // // Notes: // opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ. // // For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated // uninitalized object. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif var_types Compiler::impImportCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, GenTree* newobjThis, int prefixFlags, CORINFO_CALL_INFO* callInfo, IL_OFFSET rawILOffset) { assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI); // The current statement DI may not refer to the exact call, but for calls // we wish to be able to attach the exact IL instruction to get "return // value" support in the debugger, so create one with the exact IL offset. DebugInfo di = impCreateDIWithCurrentStackInfo(rawILOffset, true); var_types callRetTyp = TYP_COUNT; CORINFO_SIG_INFO* sig = nullptr; CORINFO_METHOD_HANDLE methHnd = nullptr; CORINFO_CLASS_HANDLE clsHnd = nullptr; unsigned clsFlags = 0; unsigned mflags = 0; GenTree* call = nullptr; GenTreeCall::Use* args = nullptr; CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM; CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr; bool exactContextNeedsRuntimeLookup = false; bool canTailCall = true; const char* szCanTailCallFailReason = nullptr; const int tailCallFlags = (prefixFlags & PREFIX_TAILCALL); const bool isReadonlyCall = (prefixFlags & PREFIX_READONLY) != 0; CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr; // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could // do that before tailcalls, but that is probably not the intended // semantic. So just disallow tailcalls from synchronized methods. // Also, popping arguments in a varargs function is more work and NYI // If we have a security object, we have to keep our frame around for callers // to see any imperative security. // Reverse P/Invokes need a call to CORINFO_HELP_JIT_REVERSE_PINVOKE_EXIT // at the end, so tailcalls should be disabled. if (info.compFlags & CORINFO_FLG_SYNCH) { canTailCall = false; szCanTailCallFailReason = "Caller is synchronized"; } else if (opts.IsReversePInvoke()) { canTailCall = false; szCanTailCallFailReason = "Caller is Reverse P/Invoke"; } #if !FEATURE_FIXED_OUT_ARGS else if (info.compIsVarArgs) { canTailCall = false; szCanTailCallFailReason = "Caller is varargs"; } #endif // FEATURE_FIXED_OUT_ARGS // We only need to cast the return value of pinvoke inlined calls that return small types // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there. // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for // the time being that the callee might be compiled by the other JIT and thus the return // value will need to be widened by us (or not widened at all...) // ReadyToRun code sticks with default calling convention that does not widen small return types. bool checkForSmallType = opts.IsReadyToRun(); bool bIntrinsicImported = false; CORINFO_SIG_INFO calliSig; GenTreeCall::Use* extraArg = nullptr; /*------------------------------------------------------------------------- * First create the call node */ if (opcode == CEE_CALLI) { if (IsTargetAbi(CORINFO_CORERT_ABI)) { // See comment in impCheckForPInvokeCall BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; if (info.compCompHnd->convertPInvokeCalliToCall(pResolvedToken, !impCanPInvokeInlineCallSite(block))) { eeGetCallInfo(pResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, callInfo); return impImportCall(CEE_CALL, pResolvedToken, nullptr, nullptr, prefixFlags, callInfo, rawILOffset); } } /* Get the call site sig */ eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &calliSig); callRetTyp = JITtype2varType(calliSig.retType); call = impImportIndirectCall(&calliSig, di); // We don't know the target method, so we have to infer the flags, or // assume the worst-case. mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC; #ifdef DEBUG if (verbose) { unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0; printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n", opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize); } #endif sig = &calliSig; } else // (opcode != CEE_CALLI) { NamedIntrinsic ni = NI_Illegal; // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to // supply the instantiation parameters necessary to make direct calls to underlying // shared generic code, rather than calling through instantiating stubs. If the // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT // must indeed pass an instantiation parameter. methHnd = callInfo->hMethod; sig = &(callInfo->sig); callRetTyp = JITtype2varType(sig->retType); mflags = callInfo->methodFlags; #ifdef DEBUG if (verbose) { unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0; printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n", opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize); } #endif if (compIsForInlining()) { /* Does the inlinee use StackCrawlMark */ if (mflags & CORINFO_FLG_DONT_INLINE_CALLER) { compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK); return TYP_UNDEF; } /* For now ignore varargs */ if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS); return TYP_UNDEF; } if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS); return TYP_UNDEF; } if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT)) { compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL); return TYP_UNDEF; } } clsHnd = pResolvedToken->hClass; clsFlags = callInfo->classFlags; #ifdef DEBUG // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute. // This recognition should really be done by knowing the methHnd of the relevant Mark method(s). // These should be in corelib.h, and available through a JIT/EE interface call. const char* modName; const char* className; const char* methodName; if ((className = eeGetClassName(clsHnd)) != nullptr && strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 && (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0) { return impImportJitTestLabelMark(sig->numArgs); } #endif // DEBUG // <NICE> Factor this into getCallInfo </NICE> bool isSpecialIntrinsic = false; if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_INTRINSIC)) != 0) { const bool isTailCall = canTailCall && (tailCallFlags != 0); call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, isReadonlyCall, isTailCall, pConstrainedResolvedToken, callInfo->thisTransform, &ni, &isSpecialIntrinsic); if (compDonotInline()) { return TYP_UNDEF; } if (call != nullptr) { #ifdef FEATURE_READYTORUN if (call->OperGet() == GT_INTRINSIC) { if (opts.IsReadyToRun()) { noway_assert(callInfo->kind == CORINFO_CALL); call->AsIntrinsic()->gtEntryPoint = callInfo->codePointerLookup.constLookup; } else { call->AsIntrinsic()->gtEntryPoint.addr = nullptr; call->AsIntrinsic()->gtEntryPoint.accessType = IAT_VALUE; } } #endif bIntrinsicImported = true; goto DONE_CALL; } } #ifdef FEATURE_SIMD if (supportSIMDTypes()) { call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token); if (call != nullptr) { bIntrinsicImported = true; goto DONE_CALL; } } #endif // FEATURE_SIMD if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT)) { NO_WAY("Virtual call to a function added via EnC is not supported"); } if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG) { BADCODE("Bad calling convention"); } //------------------------------------------------------------------------- // Construct the call node // // Work out what sort of call we're making. // Dispense with virtual calls implemented via LDVIRTFTN immediately. constraintCallThisTransform = callInfo->thisTransform; exactContextHnd = callInfo->contextHandle; exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup; switch (callInfo->kind) { case CORINFO_VIRTUALCALL_STUB: { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); if (callInfo->stubLookup.lookupKind.needsRuntimeLookup) { if (callInfo->stubLookup.lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED) { // Runtime does not support inlining of all shapes of runtime lookups // Inlining has to be aborted in such a case compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE); return TYP_UNDEF; } GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd); assert(!compDonotInline()); // This is the rough code to set up an indirect stub call assert(stubAddr != nullptr); // The stubAddr may be a // complex expression. As it is evaluated after the args, // it may cause registered args to be spilled. Simply spill it. unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup")); impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_NONE); stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL); // Create the actual call node assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr); call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT); call->gtFlags |= GTF_CALL_VIRT_STUB; #ifdef TARGET_X86 // No tailcalls allowed for these yet... canTailCall = false; szCanTailCallFailReason = "VirtualCall with runtime lookup"; #endif } else { // The stub address is known at compile time call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); call->AsCall()->gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr; call->gtFlags |= GTF_CALL_VIRT_STUB; assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE && callInfo->stubLookup.constLookup.accessType != IAT_RELPVALUE); if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT; } } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // Null check is sometimes needed for ready to run to handle // non-virtual <-> virtual changes between versions if (callInfo->nullInstanceCheck) { call->gtFlags |= GTF_CALL_NULLCHECK; } } #endif break; } case CORINFO_VIRTUALCALL_VTABLE: { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); call->gtFlags |= GTF_CALL_VIRT_VTABLE; // Should we expand virtual call targets early for this method? // if (opts.compExpandCallsEarly) { // Mark this method to expand the virtual call target early in fgMorpgCall call->AsCall()->SetExpandedEarly(); } break; } case CORINFO_VIRTUALCALL_LDVIRTFTN: { if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN); return TYP_UNDEF; } assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); // OK, We've been told to call via LDVIRTFTN, so just // take the call now.... GenTreeCall::Use* args = impPopCallArgs(sig->numArgs, sig); GenTree* thisPtr = impPopStack().val; thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform); assert(thisPtr != nullptr); // Clone the (possibly transformed) "this" pointer GenTree* thisPtrCopy; thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("LDVIRTFTN this pointer")); GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo); assert(fptr != nullptr); thisPtr = nullptr; // can't reuse it // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer")); impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); // Create the actual call node call = gtNewIndCallNode(fptr, callRetTyp, args, di); call->AsCall()->gtCallThisArg = gtNewCallArgs(thisPtrCopy); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI)) { // CoreRT generic virtual method: need to handle potential fat function pointers addFatPointerCandidate(call->AsCall()); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // Null check is needed for ready to run to handle // non-virtual <-> virtual changes between versions call->gtFlags |= GTF_CALL_NULLCHECK; } #endif // Sine we are jumping over some code, check that its OK to skip that code assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); goto DONE; } case CORINFO_CALL: { // This is for a non-virtual, non-interface etc. call call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); // We remove the nullcheck for the GetType call intrinsic. // TODO-CQ: JIT64 does not introduce the null check for many more helper calls // and intrinsics. if (callInfo->nullInstanceCheck && !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (ni == NI_System_Object_GetType))) { call->gtFlags |= GTF_CALL_NULLCHECK; } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { call->AsCall()->setEntryPoint(callInfo->codePointerLookup.constLookup); } #endif break; } case CORINFO_CALL_CODE_POINTER: { // The EE has asked us to call by computing a code pointer and then doing an // indirect call. This is because a runtime lookup is required to get the code entry point. // These calls always follow a uniform calling convention, i.e. no extra hidden params assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0); assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG); assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); GenTree* fptr = impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod); if (compDonotInline()) { return TYP_UNDEF; } // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer")); impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); call = gtNewIndCallNode(fptr, callRetTyp, nullptr, di); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); if (callInfo->nullInstanceCheck) { call->gtFlags |= GTF_CALL_NULLCHECK; } break; } default: assert(!"unknown call kind"); break; } //------------------------------------------------------------------------- // Set more flags PREFIX_ASSUME(call != nullptr); if (mflags & CORINFO_FLG_NOGCCHECK) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK; } // Mark call if it's one of the ones we will maybe treat as an intrinsic if (isSpecialIntrinsic) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC; } } assert(sig); assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set. /* Some sanity checks */ // CALL_VIRT and NEWOBJ must have a THIS pointer assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS)); // static bit and hasThis are negations of one another assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0)); assert(call != nullptr); /*------------------------------------------------------------------------- * Check special-cases etc */ /* Special case - Check if it is a call to Delegate.Invoke(). */ if (mflags & CORINFO_FLG_DELEGATE_INVOKE) { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(mflags & CORINFO_FLG_FINAL); /* Set the delegate flag */ call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV; if (callInfo->wrapperDelegateInvoke) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_WRAPPER_DELEGATE_INV; } if (opcode == CEE_CALLVIRT) { assert(mflags & CORINFO_FLG_FINAL); /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */ assert(call->gtFlags & GTF_CALL_NULLCHECK); call->gtFlags &= ~GTF_CALL_NULLCHECK; } } CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass; actualMethodRetTypeSigClass = sig->retTypeSigClass; /* Check for varargs */ if (!compFeatureVarArg() && ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG || (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)) { BADCODE("Varargs not supported."); } if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG || (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG) { assert(!compIsForInlining()); /* Set the right flags */ call->gtFlags |= GTF_CALL_POP_ARGS; call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VARARGS; /* Can't allow tailcall for varargs as it is caller-pop. The caller will be expecting to pop a certain number of arguments, but if we tailcall to a function with a different number of arguments, we are hosed. There are ways around this (caller remembers esp value, varargs is not caller-pop, etc), but not worth it. */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_X86 if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "Callee is varargs"; } #endif /* Get the total number of arguments - this is already correct * for CALLI - for methods we have to get it from the call site */ if (opcode != CEE_CALLI) { #ifdef DEBUG unsigned numArgsDef = sig->numArgs; #endif eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); // For vararg calls we must be sure to load the return type of the // method actually being called, as well as the return types of the // specified in the vararg signature. With type equivalency, these types // may not be the same. if (sig->retTypeSigClass != actualMethodRetTypeSigClass) { if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS && sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggerred from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass); } } assert(numArgsDef <= sig->numArgs); } /* We will have "cookie" as the last argument but we cannot push * it on the operand stack because we may overflow, so we append it * to the arg list next after we pop them */ } //--------------------------- Inline NDirect ------------------------------ // For inline cases we technically should look at both the current // block and the call site block (or just the latter if we've // fused the EH trees). However the block-related checks pertain to // EH and we currently won't inline a method with EH. So for // inlinees, just checking the call site block is sufficient. { // New lexical block here to avoid compilation errors because of GOTOs. BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block); } #ifdef UNIX_X86_ABI // On Unix x86 we use caller-cleaned convention. if ((call->gtFlags & GTF_CALL_UNMANAGED) == 0) call->gtFlags |= GTF_CALL_POP_ARGS; #endif // UNIX_X86_ABI if (call->gtFlags & GTF_CALL_UNMANAGED) { // We set up the unmanaged call by linking the frame, disabling GC, etc // This needs to be cleaned up on return. // In addition, native calls have different normalization rules than managed code // (managed calling convention always widens return values in the callee) if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "Callee is native"; } checkForSmallType = true; impPopArgsForUnmanagedCall(call, sig); goto DONE; } else if ((opcode == CEE_CALLI) && ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT) && ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG)) { if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig)) { // Normally this only happens with inlining. // However, a generic method (or type) being NGENd into another module // can run into this issue as well. There's not an easy fall-back for NGEN // so instead we fallback to JIT. if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE); } else { IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)"); } return TYP_UNDEF; } GenTree* cookie = eeGetPInvokeCookie(sig); // This cookie is required to be either a simple GT_CNS_INT or // an indirection of a GT_CNS_INT // GenTree* cookieConst = cookie; if (cookie->gtOper == GT_IND) { cookieConst = cookie->AsOp()->gtOp1; } assert(cookieConst->gtOper == GT_CNS_INT); // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that // we won't allow this tree to participate in any CSE logic // cookie->gtFlags |= GTF_DONT_CSE; cookieConst->gtFlags |= GTF_DONT_CSE; call->AsCall()->gtCallCookie = cookie; if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "PInvoke calli"; } } /*------------------------------------------------------------------------- * Create the argument list */ //------------------------------------------------------------------------- // Special case - for varargs we have an implicit last argument if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { assert(!compIsForInlining()); void *varCookie, *pVarCookie; if (!info.compCompHnd->canGetVarArgsHandle(sig)) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE); return TYP_UNDEF; } varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie); assert((!varCookie) != (!pVarCookie)); GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig); assert(extraArg == nullptr); extraArg = gtNewCallArgs(cookie); } //------------------------------------------------------------------------- // Extra arg for shared generic code and array methods // // Extra argument containing instantiation information is passed in the // following circumstances: // (a) To the "Address" method on array classes; the extra parameter is // the array's type handle (a TypeDesc) // (b) To shared-code instance methods in generic structs; the extra parameter // is the struct's type handle (a vtable ptr) // (c) To shared-code per-instantiation non-generic static methods in generic // classes and structs; the extra parameter is the type handle // (d) To shared-code generic methods; the extra parameter is an // exact-instantiation MethodDesc // // We also set the exact type context associated with the call so we can // inline the call correctly later on. if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE) { assert(call->AsCall()->gtCallType == CT_USER_FUNC); if (clsHnd == nullptr) { NO_WAY("CALLI on parameterized type"); } assert(opcode != CEE_CALLI); GenTree* instParam; bool runtimeLookup; // Instantiated generic method if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD) { assert(exactContextHnd != METHOD_BEING_COMPILED_CONTEXT()); CORINFO_METHOD_HANDLE exactMethodHandle = (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK); if (!exactContextNeedsRuntimeLookup) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { instParam = impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } else #endif { instParam = gtNewIconEmbMethHndNode(exactMethodHandle); info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle); } } else { instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, true /*mustRestoreHandle*/); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } } // otherwise must be an instance method in a generic struct, // a static method in a generic type, or a runtime-generated array method else { assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS); CORINFO_CLASS_HANDLE exactClassHandle = eeGetClassFromContext(exactContextHnd); if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0) { compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD); return TYP_UNDEF; } if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall) { // We indicate "readonly" to the Address operation by using a null // instParam. instParam = gtNewIconNode(0, TYP_REF); } else if (!exactContextNeedsRuntimeLookup) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { instParam = impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } else #endif { instParam = gtNewIconEmbClsHndNode(exactClassHandle); info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle); } } else { instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, true /*mustRestoreHandle*/); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } } assert(extraArg == nullptr); extraArg = gtNewCallArgs(instParam); } if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0)) { // Only verifiable cases are supported. // dup; ldvirtftn; newobj; or ldftn; newobj. // IL test could contain unverifiable sequence, in this case optimization should not be done. if (impStackHeight() > 0) { typeInfo delegateTypeInfo = impStackTop().seTypeInfo; if (delegateTypeInfo.IsToken()) { ldftnToken = delegateTypeInfo.GetToken(); } } } //------------------------------------------------------------------------- // The main group of arguments args = impPopCallArgs(sig->numArgs, sig, extraArg); call->AsCall()->gtCallArgs = args; for (GenTreeCall::Use& use : call->AsCall()->Args()) { call->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT; } //------------------------------------------------------------------------- // The "this" pointer if (((mflags & CORINFO_FLG_STATIC) == 0) && ((sig->callConv & CORINFO_CALLCONV_EXPLICITTHIS) == 0) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr))) { GenTree* obj; if (opcode == CEE_NEWOBJ) { obj = newobjThis; } else { obj = impPopStack().val; obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform); if (compDonotInline()) { return TYP_UNDEF; } } // Store the "this" value in the call call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT; call->AsCall()->gtCallThisArg = gtNewCallArgs(obj); // Is this a virtual or interface call? if (call->AsCall()->IsVirtual()) { // only true object pointers can be virtual assert(obj->gtType == TYP_REF); // See if we can devirtualize. const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0; const bool isLateDevirtualization = false; impDevirtualizeCall(call->AsCall(), pResolvedToken, &callInfo->hMethod, &callInfo->methodFlags, &callInfo->contextHandle, &exactContextHnd, isLateDevirtualization, isExplicitTailCall, // Take care to pass raw IL offset here as the 'debug info' might be different for // inlinees. rawILOffset); // Devirtualization may change which method gets invoked. Update our local cache. // methHnd = callInfo->hMethod; } if (impIsThis(obj)) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS; } } //------------------------------------------------------------------------- // The "this" pointer for "newobj" if (opcode == CEE_NEWOBJ) { if (clsFlags & CORINFO_FLG_VAROBJSIZE) { assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately // This is a 'new' of a variable sized object, wher // the constructor is to return the object. In this case // the constructor claims to return VOID but we know it // actually returns the new object assert(callRetTyp == TYP_VOID); callRetTyp = TYP_REF; call->gtType = TYP_REF; impSpillSpecialSideEff(); impPushOnStack(call, typeInfo(TI_REF, clsHnd)); } else { if (clsFlags & CORINFO_FLG_DELEGATE) { // New inliner morph it in impImportCall. // This will allow us to inline the call to the delegate constructor. call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken); } if (!bIntrinsicImported) { #if defined(DEBUG) || defined(INLINE_DATA) // Keep track of the raw IL offset of the call call->AsCall()->gtRawILOffset = rawILOffset; #endif // defined(DEBUG) || defined(INLINE_DATA) // Is it an inline candidate? impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); } // append the call node. impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); // Now push the value of the 'new onto the stack // This is a 'new' of a non-variable sized object. // Append the new node (op1) to the statement list, // and then push the local holding the value of this // new instruction on the stack. if (clsFlags & CORINFO_FLG_VALUECLASS) { assert(newobjThis->gtOper == GT_ADDR && newobjThis->AsOp()->gtOp1->gtOper == GT_LCL_VAR); unsigned tmp = newobjThis->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack()); } else { if (newobjThis->gtOper == GT_COMMA) { // We must have inserted the callout. Get the real newobj. newobjThis = newobjThis->AsOp()->gtOp2; } assert(newobjThis->gtOper == GT_LCL_VAR); impPushOnStack(gtNewLclvNode(newobjThis->AsLclVarCommon()->GetLclNum(), TYP_REF), typeInfo(TI_REF, clsHnd)); } } return callRetTyp; } DONE: #ifdef DEBUG // In debug we want to be able to register callsites with the EE. assert(call->AsCall()->callSig == nullptr); call->AsCall()->callSig = new (this, CMK_Generic) CORINFO_SIG_INFO; *call->AsCall()->callSig = *sig; #endif // Final importer checks for calls flagged as tail calls. // if (tailCallFlags != 0) { const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0; const bool isImplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_IMPLICIT) != 0; const bool isStressTailCall = (tailCallFlags & PREFIX_TAILCALL_STRESS) != 0; // Exactly one of these should be true. assert(isExplicitTailCall != isImplicitTailCall); // This check cannot be performed for implicit tail calls for the reason // that impIsImplicitTailCallCandidate() is not checking whether return // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT. // As a result it is possible that in the following case, we find that // the type stack is non-empty if Callee() is considered for implicit // tail calling. // int Caller(..) { .... void Callee(); ret val; ... } // // Note that we cannot check return type compatibility before ImpImportCall() // as we don't have required info or need to duplicate some of the logic of // ImpImportCall(). // // For implicit tail calls, we perform this check after return types are // known to be compatible. if (isExplicitTailCall && (verCurrentState.esStackDepth != 0)) { BADCODE("Stack should be empty after tailcall"); } // For opportunistic tailcalls we allow implicit widening, i.e. tailcalls from int32 -> int16, since the // managed calling convention dictates that the callee widens the value. For explicit tailcalls we don't // want to require this detail of the calling convention to bubble up to the tailcall helpers bool allowWidening = isImplicitTailCall; if (canTailCall && !impTailCallRetTypeCompatible(allowWidening, info.compRetType, info.compMethodInfo->args.retTypeClass, info.compCallConv, callRetTyp, sig->retTypeClass, call->AsCall()->GetUnmanagedCallConv())) { canTailCall = false; szCanTailCallFailReason = "Return types are not tail call compatible"; } // Stack empty check for implicit tail calls. if (canTailCall && isImplicitTailCall && (verCurrentState.esStackDepth != 0)) { #ifdef TARGET_AMD64 // JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException // in JIT64, not an InvalidProgramException. Verify(false, "Stack should be empty after tailcall"); #else // TARGET_64BIT BADCODE("Stack should be empty after tailcall"); #endif //! TARGET_64BIT } // assert(compCurBB is not a catch, finally or filter block); // assert(compCurBB is not a try block protected by a finally block); assert(!isExplicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN); // Ask VM for permission to tailcall if (canTailCall) { // True virtual or indirect calls, shouldn't pass in a callee handle. CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->AsCall()->gtCallType != CT_USER_FUNC) || call->AsCall()->IsVirtual()) ? nullptr : methHnd; if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, isExplicitTailCall)) { if (isExplicitTailCall) { // In case of explicit tail calls, mark it so that it is not considered // for in-lining. call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL; JITDUMP("\nGTF_CALL_M_EXPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call)); if (isStressTailCall) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_STRESS_TAILCALL; JITDUMP("\nGTF_CALL_M_STRESS_TAILCALL set for call [%06u]\n", dspTreeID(call)); } } else { #if FEATURE_TAILCALL_OPT // Must be an implicit tail call. assert(isImplicitTailCall); // It is possible that a call node is both an inline candidate and marked // for opportunistic tail calling. In-lining happens before morhphing of // trees. If in-lining of an in-line candidate gets aborted for whatever // reason, it will survive to the morphing stage at which point it will be // transformed into a tail call after performing additional checks. call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL; JITDUMP("\nGTF_CALL_M_IMPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call)); #else //! FEATURE_TAILCALL_OPT NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls"); #endif // FEATURE_TAILCALL_OPT } // This might or might not turn into a tailcall. We do more // checks in morph. For explicit tailcalls we need more // information in morph in case it turns out to be a // helper-based tailcall. if (isExplicitTailCall) { assert(call->AsCall()->tailCallInfo == nullptr); call->AsCall()->tailCallInfo = new (this, CMK_CorTailCallInfo) TailCallSiteInfo; switch (opcode) { case CEE_CALLI: call->AsCall()->tailCallInfo->SetCalli(sig); break; case CEE_CALLVIRT: call->AsCall()->tailCallInfo->SetCallvirt(sig, pResolvedToken); break; default: call->AsCall()->tailCallInfo->SetCall(sig, pResolvedToken); break; } } } else { // canTailCall reported its reasons already canTailCall = false; JITDUMP("\ninfo.compCompHnd->canTailCall returned false for call [%06u]\n", dspTreeID(call)); } } else { // If this assert fires it means that canTailCall was set to false without setting a reason! assert(szCanTailCallFailReason != nullptr); JITDUMP("\nRejecting %splicit tail call for [%06u], reason: '%s'\n", isExplicitTailCall ? "ex" : "im", dspTreeID(call), szCanTailCallFailReason); info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, isExplicitTailCall, TAILCALL_FAIL, szCanTailCallFailReason); } } // Note: we assume that small return types are already normalized by the managed callee // or by the pinvoke stub for calls to unmanaged code. if (!bIntrinsicImported) { // // Things needed to be checked when bIntrinsicImported is false. // assert(call->gtOper == GT_CALL); assert(callInfo != nullptr); if (compIsForInlining() && opcode == CEE_CALLVIRT) { GenTree* callObj = call->AsCall()->gtCallThisArg->GetNode(); if ((call->AsCall()->IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, call->AsCall()->gtCallArgs, callObj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } #if defined(DEBUG) || defined(INLINE_DATA) // Keep track of the raw IL offset of the call call->AsCall()->gtRawILOffset = rawILOffset; #endif // defined(DEBUG) || defined(INLINE_DATA) // Is it an inline candidate? impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); } // Extra checks for tail calls and tail recursion. // // A tail recursive call is a potential loop from the current block to the start of the root method. // If we see a tail recursive call, mark the blocks from the call site back to the entry as potentially // being in a loop. // // Note: if we're importing an inlinee we don't mark the right set of blocks, but by then it's too // late. Currently this doesn't lead to problems. See GitHub issue 33529. // // OSR also needs to handle tail calls specially: // * block profiling in OSR methods needs to ensure probes happen before tail calls, not after. // * the root method entry must be imported if there's a recursive tail call or a potentially // inlineable tail call. // if ((tailCallFlags != 0) && canTailCall) { if (gtIsRecursiveCall(methHnd)) { assert(verCurrentState.esStackDepth == 0); BasicBlock* loopHead = nullptr; if (!compIsForInlining() && opts.IsOSR()) { // For root method OSR we may branch back to the actual method entry, // which is not fgFirstBB, and which we will need to import. assert(fgEntryBB != nullptr); loopHead = fgEntryBB; } else { // For normal jitting we may branch back to the firstBB; this // should already be imported. loopHead = fgFirstBB; } JITDUMP("\nTail recursive call [%06u] in the method. Mark " FMT_BB " to " FMT_BB " as having a backward branch.\n", dspTreeID(call), loopHead->bbNum, compCurBB->bbNum); fgMarkBackwardJump(loopHead, compCurBB); } // We only do these OSR checks in the root method because: // * If we fail to import the root method entry when importing the root method, we can't go back // and import it during inlining. So instead of checking jsut for recursive tail calls we also // have to check for anything that might introduce a recursive tail call. // * We only instrument root method blocks in OSR methods, // if (opts.IsOSR() && !compIsForInlining()) { // If a root method tail call candidate block is not a BBJ_RETURN, it should have a unique // BBJ_RETURN successor. Mark that successor so we can handle it specially during profile // instrumentation. // if (compCurBB->bbJumpKind != BBJ_RETURN) { BasicBlock* const successor = compCurBB->GetUniqueSucc(); assert(successor->bbJumpKind == BBJ_RETURN); successor->bbFlags |= BBF_TAILCALL_SUCCESSOR; optMethodFlags |= OMF_HAS_TAILCALL_SUCCESSOR; } // If this call might eventually turn into a loop back to method entry, make sure we // import the method entry. // assert(call->IsCall()); GenTreeCall* const actualCall = call->AsCall(); const bool mustImportEntryBlock = gtIsRecursiveCall(methHnd) || actualCall->IsInlineCandidate() || actualCall->IsGuardedDevirtualizationCandidate(); // Only schedule importation if we're not currently importing. // if (mustImportEntryBlock && (compCurBB != fgEntryBB)) { JITDUMP("\nOSR: inlineable or recursive tail call [%06u] in the method, so scheduling " FMT_BB " for importation\n", dspTreeID(call), fgEntryBB->bbNum); impImportBlockPending(fgEntryBB); } } } if ((sig->flags & CORINFO_SIGFLAG_FAT_CALL) != 0) { assert(opcode == CEE_CALLI || callInfo->kind == CORINFO_CALL_CODE_POINTER); addFatPointerCandidate(call->AsCall()); } DONE_CALL: // Push or append the result of the call if (callRetTyp == TYP_VOID) { if (opcode == CEE_NEWOBJ) { // we actually did push something, so don't spill the thing we just pushed. assert(verCurrentState.esStackDepth > 0); impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtDI); } else { impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } } else { impSpillSpecialSideEff(); if (clsFlags & CORINFO_FLG_ARRAY) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); } typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass); tiRetVal.NormaliseForStack(); // The CEE_READONLY prefix modifies the verification semantics of an Address // operation on an array type. if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall && tiRetVal.IsByRef()) { tiRetVal.SetIsReadonlyByRef(); } if (call->IsCall()) { // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call) GenTreeCall* origCall = call->AsCall(); const bool isFatPointerCandidate = origCall->IsFatPointerCandidate(); const bool isInlineCandidate = origCall->IsInlineCandidate(); const bool isGuardedDevirtualizationCandidate = origCall->IsGuardedDevirtualizationCandidate(); if (varTypeIsStruct(callRetTyp)) { // Need to treat all "split tree" cases here, not just inline candidates call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass); } // TODO: consider handling fatcalli cases this way too...? if (isInlineCandidate || isGuardedDevirtualizationCandidate) { // We should not have made any adjustments in impFixupCallStructReturn // as we defer those until we know the fate of the call. assert(call == origCall); assert(opts.OptEnabled(CLFLG_INLINING)); assert(!isFatPointerCandidate); // We should not try to inline calli. // Make the call its own tree (spill the stack if needed). // Do not consume the debug info here. This is particularly // important if we give up on the inline, in which case the // call will typically end up in the statement that contains // the GT_RET_EXPR that we leave on the stack. impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI, false); // TODO: Still using the widened type. GenTree* retExpr = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp), compCurBB->bbFlags); // Link the retExpr to the call so if necessary we can manipulate it later. origCall->gtInlineCandidateInfo->retExpr = retExpr; // Propagate retExpr as the placeholder for the call. call = retExpr; } else { // If the call is virtual, and has a generics context, and is not going to have a class probe, // record the context for possible use during late devirt. // // If we ever want to devirt at Tier0, and/or see issues where OSR methods under PGO lose // important devirtualizations, we'll want to allow both a class probe and a captured context. // if (origCall->IsVirtual() && (origCall->gtCallType != CT_INDIRECT) && (exactContextHnd != nullptr) && (origCall->gtClassProfileCandidateInfo == nullptr)) { JITDUMP("\nSaving context %p for call [%06u]\n", exactContextHnd, dspTreeID(origCall)); origCall->gtCallMoreFlags |= GTF_CALL_M_LATE_DEVIRT; LateDevirtualizationInfo* const info = new (this, CMK_Inlining) LateDevirtualizationInfo; info->exactContextHnd = exactContextHnd; origCall->gtLateDevirtualizationInfo = info; } if (isFatPointerCandidate) { // fatPointer candidates should be in statements of the form call() or var = call(). // Such form allows to find statements with fat calls without walking through whole trees // and removes problems with cutting trees. assert(!bIntrinsicImported); assert(IsTargetAbi(CORINFO_CORERT_ABI)); if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn. { unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli")); LclVarDsc* varDsc = lvaGetDesc(calliSlot); varDsc->lvVerTypeInfo = tiRetVal; impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE); // impAssignTempGen can change src arg list and return type for call that returns struct. var_types type = genActualType(lvaTable[calliSlot].TypeGet()); call = gtNewLclvNode(calliSlot, type); } } // For non-candidates we must also spill, since we // might have locals live on the eval stack that this // call can modify. // // Suppress this for certain well-known call targets // that we know won't modify locals, eg calls that are // recognized in gtCanOptimizeTypeEquality. Otherwise // we may break key fragile pattern matches later on. bool spillStack = true; if (call->IsCall()) { GenTreeCall* callNode = call->AsCall(); if ((callNode->gtCallType == CT_HELPER) && (gtIsTypeHandleToRuntimeTypeHelper(callNode) || gtIsTypeHandleToRuntimeTypeHandleHelper(callNode))) { spillStack = false; } else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0) { spillStack = false; } } if (spillStack) { impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call")); } } } if (!bIntrinsicImported) { //------------------------------------------------------------------------- // /* If the call is of a small type and the callee is managed, the callee will normalize the result before returning. However, we need to normalize small type values returned by unmanaged functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here if we use the shorter inlined pinvoke stub. */ if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT)) { call = gtNewCastNode(genActualType(callRetTyp), call, false, callRetTyp); } } impPushOnStack(call, tiRetVal); } // VSD functions get a new call target each time we getCallInfo, so clear the cache. // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out. // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB)) // callInfoCache.uncacheCallInfo(); return callRetTyp; } #ifdef _PREFAST_ #pragma warning(pop) #endif bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv) { CorInfoType corType = methInfo->args.retType; if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY)) { // We have some kind of STRUCT being returned structPassingKind howToReturnStruct = SPK_Unknown; var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, callConv, &howToReturnStruct); if (howToReturnStruct == SPK_ByReference) { return true; } } return false; } #ifdef DEBUG // var_types Compiler::impImportJitTestLabelMark(int numArgs) { TestLabelAndNum tlAndN; if (numArgs == 2) { tlAndN.m_num = 0; StackEntry se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); GenTree* val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue(); } else if (numArgs == 3) { StackEntry se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); GenTree* val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_num = val->AsIntConCommon()->IconValue(); se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue(); } else { assert(false); } StackEntry expSe = impPopStack(); GenTree* node = expSe.val; // There are a small number of special cases, where we actually put the annotation on a subnode. if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100) { // A loop hoist annotation with value >= 100 means that the expression should be a static field access, // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some // offset within the the static field block whose address is returned by the helper call. // The annotation is saying that this address calculation, but not the entire access, should be hoisted. assert(node->OperGet() == GT_IND); tlAndN.m_num -= 100; GetNodeTestData()->Set(node->AsOp()->gtOp1, tlAndN); GetNodeTestData()->Remove(node); } else { GetNodeTestData()->Set(node, tlAndN); } impPushOnStack(node, expSe.seTypeInfo); return node->TypeGet(); } #endif // DEBUG //----------------------------------------------------------------------------------- // impFixupCallStructReturn: For a call node that returns a struct do one of the following: // - set the flag to indicate struct return via retbuf arg; // - adjust the return type to a SIMD type if it is returned in 1 reg; // - spill call result into a temp if it is returned into 2 registers or more and not tail call or inline candidate. // // Arguments: // call - GT_CALL GenTree node // retClsHnd - Class handle of return type of the call // // Return Value: // Returns new GenTree node after fixing struct return of call node // GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd) { if (!varTypeIsStruct(call)) { return call; } call->gtRetClsHnd = retClsHnd; #if FEATURE_MULTIREG_RET call->InitializeStructReturnType(this, retClsHnd, call->GetUnmanagedCallConv()); const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc(); const unsigned retRegCount = retTypeDesc->GetReturnRegCount(); #else // !FEATURE_MULTIREG_RET const unsigned retRegCount = 1; #endif // !FEATURE_MULTIREG_RET structPassingKind howToReturnStruct; var_types returnType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct); if (howToReturnStruct == SPK_ByReference) { assert(returnType == TYP_UNKNOWN); call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG; return call; } // Recognize SIMD types as we do for LCL_VARs, // note it could be not the ABI specific type, for example, on x64 we can set 'TYP_SIMD8` // for `System.Numerics.Vector2` here but lower will change it to long as ABI dictates. var_types simdReturnType = impNormStructType(call->gtRetClsHnd); if (simdReturnType != call->TypeGet()) { assert(varTypeIsSIMD(simdReturnType)); JITDUMP("changing the type of a call [%06u] from %s to %s\n", dspTreeID(call), varTypeName(call->TypeGet()), varTypeName(simdReturnType)); call->ChangeType(simdReturnType); } if (retRegCount == 1) { return call; } #if FEATURE_MULTIREG_RET assert(varTypeIsStruct(call)); // It could be a SIMD returned in several regs. assert(returnType == TYP_STRUCT); assert((howToReturnStruct == SPK_ByValueAsHfa) || (howToReturnStruct == SPK_ByValue)); #ifdef UNIX_AMD64_ABI // must be a struct returned in two registers assert(retRegCount == 2); #else // not UNIX_AMD64_ABI assert(retRegCount >= 2); #endif // not UNIX_AMD64_ABI if (!call->CanTailCall() && !call->IsInlineCandidate()) { // Force a call returning multi-reg struct to be always of the IR form // tmp = call // // No need to assign a multi-reg struct to a local var if: // - It is a tail call or // - The call is marked for in-lining later return impAssignMultiRegTypeToVar(call, retClsHnd DEBUGARG(call->GetUnmanagedCallConv())); } return call; #endif // FEATURE_MULTIREG_RET } /***************************************************************************** For struct return values, re-type the operand in the case where the ABI does not use a struct return buffer */ //------------------------------------------------------------------------ // impFixupStructReturnType: For struct return values it sets appropriate flags in MULTIREG returns case; // in non-multiref case it handles two special helpers: `CORINFO_HELP_GETFIELDSTRUCT`, `CORINFO_HELP_UNBOX_NULLABLE`. // // Arguments: // op - the return value; // retClsHnd - the struct handle; // unmgdCallConv - the calling convention of the function that returns this struct. // // Return Value: // the result tree that does the return. // GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension unmgdCallConv) { assert(varTypeIsStruct(info.compRetType)); assert(info.compRetBuffArg == BAD_VAR_NUM); JITDUMP("\nimpFixupStructReturnType: retyping\n"); DISPTREE(op); #if defined(TARGET_XARCH) #if FEATURE_MULTIREG_RET // No VarArgs for CoreCLR on x64 Unix UNIX_AMD64_ABI_ONLY(assert(!info.compIsVarArgs)); // Is method returning a multi-reg struct? if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd, unmgdCallConv)) { // In case of multi-reg struct return, we force IR to be one of the following: // GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp). if (op->gtOper == GT_LCL_VAR) { // Note that this is a multi-reg return. unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } if (op->gtOper == GT_CALL) { return op; } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #else assert(info.compRetNativeType != TYP_STRUCT); #endif // defined(UNIX_AMD64_ABI) || defined(TARGET_X86) #elif FEATURE_MULTIREG_RET && defined(TARGET_ARM) if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd)) { if (op->gtOper == GT_LCL_VAR) { // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); // Make sure this struct type stays as struct so that we can return it as an HFA lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } if (op->gtOper == GT_CALL) { if (op->AsCall()->IsVarargs()) { // We cannot tail call because control needs to return to fixup the calling // convention for result return. op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL; op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; } else { return op; } } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #elif FEATURE_MULTIREG_RET && defined(TARGET_ARM64) // Is method returning a multi-reg struct? if (IsMultiRegReturnedType(retClsHnd, unmgdCallConv)) { if (op->gtOper == GT_LCL_VAR) { // This LCL_VAR stays as a TYP_STRUCT unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); if (!lvaIsImplicitByRefLocal(lclNum)) { // Make sure this struct type is not struct promoted lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } } if (op->gtOper == GT_CALL) { if (op->AsCall()->IsVarargs()) { // We cannot tail call because control needs to return to fixup the calling // convention for result return. op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL; op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; } else { return op; } } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #endif // FEATURE_MULTIREG_RET && TARGET_ARM64 if (!op->IsCall() || !op->AsCall()->TreatAsHasRetBufArg(this)) { // Don't retype `struct` as a primitive type in `ret` instruction. return op; } // This must be one of those 'special' helpers that don't // really have a return buffer, but instead use it as a way // to keep the trees cleaner with fewer address-taken temps. // // Well now we have to materialize the the return buffer as // an address-taken temp. Then we can return the temp. // // NOTE: this code assumes that since the call directly // feeds the return, then the call must be returning the // same structure/class/type. // unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer")); // No need to spill anything as we're about to return. impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE); op = gtNewLclvNode(tmpNum, info.compRetType); JITDUMP("\nimpFixupStructReturnType: created a pseudo-return buffer for a special helper\n"); DISPTREE(op); return op; } /***************************************************************************** CEE_LEAVE may be jumping out of a protected block, viz, a catch or a finally-protected try. We find the finally blocks protecting the current offset (in order) by walking over the complete exception table and finding enclosing clauses. This assumes that the table is sorted. This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS. If we are leaving a catch handler, we need to attach the CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks. After this function, the BBJ_LEAVE block has been converted to a different type. */ #if !defined(FEATURE_EH_FUNCLETS) void Compiler::impImportLeave(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nBefore import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created) unsigned blkAddr = block->bbCodeOffs; BasicBlock* leaveTarget = block->bbJumpDest; unsigned jmpAddr = leaveTarget->bbCodeOffs; // LEAVE clears the stack, spill side effects, and set stack to 0 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; assert(block->bbJumpKind == BBJ_LEAVE); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary BasicBlock* step = DUMMY_INIT(NULL); unsigned encFinallies = 0; // Number of enclosing finallies. GenTree* endCatches = NULL; Statement* endLFinStmt = NULL; // The statement tree to indicate the end of locally-invoked finally. unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Grab the handler offsets IL_OFFSET tryBeg = HBtab->ebdTryBegOffs(); IL_OFFSET tryEnd = HBtab->ebdTryEndOffs(); IL_OFFSET hndBeg = HBtab->ebdHndBegOffs(); IL_OFFSET hndEnd = HBtab->ebdHndEndOffs(); /* Is this a catch-handler we are CEE_LEAVEing out of? * If so, we need to call CORINFO_HELP_ENDCATCH. */ if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd)) { // Can't CEE_LEAVE out of a finally/fault handler if (HBtab->HasFinallyOrFaultHandler()) BADCODE("leave out of fault/finally block"); // Create the call to CORINFO_HELP_ENDCATCH GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID); // Make a list of all the currently pending endCatches if (endCatches) endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch); else endCatches = endCatch; #ifdef DEBUG if (verbose) { printf("impImportLeave - " FMT_BB " jumping out of catch handler EH#%u, adding call to " "CORINFO_HELP_ENDCATCH\n", block->bbNum, XTnum); } #endif } else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { /* This is a finally-protected try we are jumping out of */ /* If there are any pending endCatches, and we have already jumped out of a finally-protected try, then the endCatches have to be put in a block in an outer try for async exceptions to work correctly. Else, just use append to the original block */ BasicBlock* callBlock; assert(!encFinallies == !endLFinStmt); // if we have finallies, we better have an endLFin tree, and vice-versa if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); callBlock = block; callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY if (endCatches) impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY " "block %s\n", callBlock->dspToString()); } #endif } else { assert(step != DUMMY_INIT(NULL)); /* Calling the finally block */ callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step); assert(step->bbJumpKind == BBJ_ALWAYS); step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next // finally in the chain) step->bbJumpDest->bbRefs++; /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n", callBlock->dspToString()); } #endif Statement* lastStmt; if (endCatches) { lastStmt = gtNewStmt(endCatches); endLFinStmt->SetNextStmt(lastStmt); lastStmt->SetPrevStmt(endLFinStmt); } else { lastStmt = endLFinStmt; } // note that this sets BBF_IMPORTED on the block impEndTreeList(callBlock, endLFinStmt, lastStmt); } step = fgNewBBafter(BBJ_ALWAYS, callBlock, true); /* The new block will inherit this block's weight */ step->inheritWeight(block); step->bbFlags |= BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n", step->dspToString()); } #endif unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel; assert(finallyNesting <= compHndBBtabCount); callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler. GenTree* endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting); endLFinStmt = gtNewStmt(endLFin); endCatches = NULL; encFinallies++; invalidatePreds = true; } } /* Append any remaining endCatches, if any */ assert(!encFinallies == !endLFinStmt); if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS if (endCatches) impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG if (verbose) { printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS " "block %s\n", block->dspToString()); } #endif } else { // If leaveTarget is the start of another try block, we want to make sure that // we do not insert finalStep into that try block. Hence, we find the enclosing // try block. unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget); // Insert a new BB either in the try region indicated by tryIndex or // the handler region indicated by leaveTarget->bbHndIndex, // depending on which is the inner region. BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step); finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS; step->bbJumpDest = finalStep; /* The new block will inherit this block's weight */ finalStep->inheritWeight(block); #ifdef DEBUG if (verbose) { printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies, finalStep->dspToString()); } #endif Statement* lastStmt; if (endCatches) { lastStmt = gtNewStmt(endCatches); endLFinStmt->SetNextStmt(lastStmt); lastStmt->SetPrevStmt(endLFinStmt); } else { lastStmt = endLFinStmt; } impEndTreeList(finalStep, endLFinStmt, lastStmt); finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE // Queue up the jump target for importing impImportBlockPending(leaveTarget); invalidatePreds = true; } if (invalidatePreds && fgComputePredsDone) { JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n"); fgRemovePreds(); } #ifdef DEBUG fgVerifyHandlerTab(); if (verbose) { printf("\nAfter import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } #else // FEATURE_EH_FUNCLETS void Compiler::impImportLeave(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nBefore import CEE_LEAVE in " FMT_BB " (targetting " FMT_BB "):\n", block->bbNum, block->bbJumpDest->bbNum); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created) unsigned blkAddr = block->bbCodeOffs; BasicBlock* leaveTarget = block->bbJumpDest; unsigned jmpAddr = leaveTarget->bbCodeOffs; // LEAVE clears the stack, spill side effects, and set stack to 0 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; assert(block->bbJumpKind == BBJ_LEAVE); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary BasicBlock* step = nullptr; enum StepType { // No step type; step == NULL. ST_None, // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair? // That is, is step->bbJumpDest where a finally will return to? ST_FinallyReturn, // The step block is a catch return. ST_Catch, // The step block is in a "try", created as the target for a finally return or the target for a catch return. ST_Try }; StepType stepType = ST_None; unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Grab the handler offsets IL_OFFSET tryBeg = HBtab->ebdTryBegOffs(); IL_OFFSET tryEnd = HBtab->ebdTryEndOffs(); IL_OFFSET hndBeg = HBtab->ebdHndBegOffs(); IL_OFFSET hndEnd = HBtab->ebdHndEndOffs(); /* Is this a catch-handler we are CEE_LEAVEing out of? */ if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd)) { // Can't CEE_LEAVE out of a finally/fault handler if (HBtab->HasFinallyOrFaultHandler()) { BADCODE("leave out of fault/finally block"); } /* We are jumping out of a catch */ if (step == nullptr) { step = block; step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET stepType = ST_Catch; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a catch (EH#%u), convert block " FMT_BB " to BBJ_EHCATCHRET " "block\n", XTnum, step->bbNum); } #endif } else { BasicBlock* exitBlock; /* Create a new catch exit block in the catch region for the existing step block to jump to in this * scope */ exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step); assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch // exit) returns to this block step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ exitBlock->inheritWeight(block); exitBlock->bbFlags |= BBF_IMPORTED; /* This exit block is the new step */ step = exitBlock; stepType = ST_Catch; invalidatePreds = true; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block " FMT_BB "\n", XTnum, exitBlock->bbNum); } #endif } } else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { /* We are jumping out of a finally-protected try */ BasicBlock* callBlock; if (step == nullptr) { #if FEATURE_EH_CALLFINALLY_THUNKS // Put the call to the finally in the enclosing region. unsigned callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block); // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE, // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the // next block, and flow optimizations will remove it. block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = callBlock; block->bbJumpDest->bbRefs++; /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); callBlock->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB " to " "BBJ_ALWAYS, add BBJ_CALLFINALLY block " FMT_BB "\n", XTnum, block->bbNum, callBlock->bbNum); } #endif #else // !FEATURE_EH_CALLFINALLY_THUNKS callBlock = block; callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB " to " "BBJ_CALLFINALLY block\n", XTnum, callBlock->bbNum); } #endif #endif // !FEATURE_EH_CALLFINALLY_THUNKS } else { // Calling the finally block. We already have a step block that is either the call-to-finally from a // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by // a 'finally'), or the step block is the return from a catch. // // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will // automatically re-raise the exception, using the return address of the catch (that is, the target // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64, // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly // within the 'try' region protected by the finally, since we generate code in such a way that execution // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on // stack walks.) assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); #if FEATURE_EH_CALLFINALLY_THUNKS if (step->bbJumpKind == BBJ_EHCATCHRET) { // Need to create another step block in the 'try' region that will actually branch to the // call-to-finally thunk. BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step); step->bbJumpDest = step2; step->bbJumpDest->bbRefs++; step2->inheritWeight(block); step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is " "BBJ_EHCATCHRET (" FMT_BB "), new BBJ_ALWAYS step-step block " FMT_BB "\n", XTnum, step->bbNum, step2->bbNum); } #endif step = step2; assert(stepType == ST_Catch); // Leave it as catch type for now. } #endif // FEATURE_EH_CALLFINALLY_THUNKS #if FEATURE_EH_CALLFINALLY_THUNKS unsigned callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; #else // !FEATURE_EH_CALLFINALLY_THUNKS unsigned callFinallyTryIndex = XTnum + 1; unsigned callFinallyHndIndex = 0; // don't care #endif // !FEATURE_EH_CALLFINALLY_THUNKS callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step); step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next // finally in the chain) step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); callBlock->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY " "block " FMT_BB "\n", XTnum, callBlock->bbNum); } #endif } step = fgNewBBafter(BBJ_ALWAYS, callBlock, true); stepType = ST_FinallyReturn; /* The new block will inherit this block's weight */ step->inheritWeight(block); step->bbFlags |= BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) " "block " FMT_BB "\n", XTnum, step->bbNum); } #endif callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler. invalidatePreds = true; } else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { // We are jumping out of a catch-protected try. // // If we are returning from a call to a finally, then we must have a step block within a try // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the // finally raises an exception), the VM will find this step block, notice that it is in a protected region, // and invoke the appropriate catch. // // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception), // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM, // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target // address of the catch return as the new exception address. That is, the re-raised exception appears to // occur at the catch return address. If this exception return address skips an enclosing try/catch that // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should. // For example: // // try { // try { // // something here raises ThreadAbortException // LEAVE LABEL_1; // no need to stop at LABEL_2 // } catch (Exception) { // // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so // // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode. // // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised // // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only // // need to do this transformation if the current EH block is a try/catch that catches // // ThreadAbortException (or one of its parents), however we might not be able to find that // // information, so currently we do it for all catch types. // LEAVE LABEL_1; // Convert this to LEAVE LABEL2; // } // LABEL_2: LEAVE LABEL_1; // inserted by this step creation code // } catch (ThreadAbortException) { // } // LABEL_1: // // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C# // compiler. if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch)) { BasicBlock* catchStep; assert(step); if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); } else { assert(stepType == ST_Catch); assert(step->bbJumpKind == BBJ_EHCATCHRET); } /* Create a new exit block in the try region for the existing step block to jump to in this scope */ catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step); step->bbJumpDest = catchStep; step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ catchStep->inheritWeight(block); catchStep->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { if (stepType == ST_FinallyReturn) { printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new " "BBJ_ALWAYS block " FMT_BB "\n", XTnum, catchStep->bbNum); } else { assert(stepType == ST_Catch); printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new " "BBJ_ALWAYS block " FMT_BB "\n", XTnum, catchStep->bbNum); } } #endif // DEBUG /* This block is the new step */ step = catchStep; stepType = ST_Try; invalidatePreds = true; } } } if (step == nullptr) { block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS #ifdef DEBUG if (verbose) { printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE " "block " FMT_BB " to BBJ_ALWAYS\n", block->bbNum); } #endif } else { step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) #ifdef DEBUG if (verbose) { printf("impImportLeave - final destination of step blocks set to " FMT_BB "\n", leaveTarget->bbNum); } #endif // Queue up the jump target for importing impImportBlockPending(leaveTarget); } if (invalidatePreds && fgComputePredsDone) { JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n"); fgRemovePreds(); } #ifdef DEBUG fgVerifyHandlerTab(); if (verbose) { printf("\nAfter import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } #endif // FEATURE_EH_FUNCLETS /*****************************************************************************/ // This is called when reimporting a leave block. It resets the JumpKind, // JumpDest, and bbNext to the original values void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) { #if defined(FEATURE_EH_FUNCLETS) // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1) // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0, // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the // only predecessor are also considered orphans and attempted to be deleted. // // try { // .... // try // { // .... // leave OUTSIDE; // B0 is the block containing this leave, following this would be B1 // } finally { } // } finally { } // OUTSIDE: // // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block // where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block. // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed. To // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1 // will be treated as pair and handled correctly. if (block->bbJumpKind == BBJ_CALLFINALLY) { BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind); dupBlock->bbFlags = block->bbFlags; dupBlock->bbJumpDest = block->bbJumpDest; dupBlock->copyEHRegion(block); dupBlock->bbCatchTyp = block->bbCatchTyp; // Mark this block as // a) not referenced by any other block to make sure that it gets deleted // b) weight zero // c) prevent from being imported // d) as internal // e) as rarely run dupBlock->bbRefs = 0; dupBlock->bbWeight = BB_ZERO_WEIGHT; dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY; // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS // will be next to each other. fgInsertBBafter(block, dupBlock); #ifdef DEBUG if (verbose) { printf("New Basic Block " FMT_BB " duplicate of " FMT_BB " created.\n", dupBlock->bbNum, block->bbNum); } #endif } #endif // FEATURE_EH_FUNCLETS block->bbJumpKind = BBJ_LEAVE; fgInitBBLookup(); block->bbJumpDest = fgLookupBB(jmpAddr); // We will leave the BBJ_ALWAYS block we introduced. When it's reimported // the BBJ_ALWAYS block will be unreachable, and will be removed after. The // reason we don't want to remove the block at this point is that if we call // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be // added and the linked list length will be different than fgBBcount. } /*****************************************************************************/ // Get the first non-prefix opcode. Used for verification of valid combinations // of prefixes and actual opcodes. OPCODE Compiler::impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp) { while (codeAddr < codeEndp) { OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); if (opcode == CEE_PREFIX1) { if (codeAddr >= codeEndp) { break; } opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256); codeAddr += sizeof(__int8); } switch (opcode) { case CEE_UNALIGNED: case CEE_VOLATILE: case CEE_TAILCALL: case CEE_CONSTRAINED: case CEE_READONLY: break; default: return opcode; } codeAddr += opcodeSizes[opcode]; } return CEE_ILLEGAL; } /*****************************************************************************/ // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes void Compiler::impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix) { OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (!( // Opcode of all ldind and stdind happen to be in continuous, except stind.i. ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) || (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) || (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) || // volatile. prefix is allowed with the ldsfld and stsfld (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD))))) { BADCODE("Invalid opcode for unaligned. or volatile. prefix"); } } /*****************************************************************************/ #ifdef DEBUG #undef RETURN // undef contracts RETURN macro enum controlFlow_t { NEXT, CALL, RETURN, THROW, BRANCH, COND_BRANCH, BREAK, PHI, META, }; const static controlFlow_t controlFlow[] = { #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow, #include "opcode.def" #undef OPDEF }; #endif // DEBUG /***************************************************************************** * Determine the result type of an arithemetic operation * On 64-bit inserts upcasts when native int is mixed with int32 */ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2) { var_types type = TYP_UNDEF; GenTree* op1 = *pOp1; GenTree* op2 = *pOp2; // Arithemetic operations are generally only allowed with // primitive types, but certain operations are allowed // with byrefs if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF)) { if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF)) { // byref1-byref2 => gives a native int type = TYP_I_IMPL; } else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF)) { // [native] int - byref => gives a native int // // The reason is that it is possible, in managed C++, // to have a tree like this: // // - // / \. // / \. // / \. // / \. // const(h) int addr byref // // <BUGNUM> VSW 318822 </BUGNUM> // // So here we decide to make the resulting type to be a native int. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_I_IMPL; } else { // byref - [native] int => gives a byref assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet())); #ifdef TARGET_64BIT if ((genActualType(op2->TypeGet()) != TYP_I_IMPL)) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_BYREF; } } else if ((oper == GT_ADD) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF)) { // byref + [native] int => gives a byref // (or) // [native] int + byref => gives a byref // only one can be a byref : byref op byref not allowed assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF); assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet())); #ifdef TARGET_64BIT if (genActualType(op2->TypeGet()) == TYP_BYREF) { if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } } else if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_BYREF; } #ifdef TARGET_64BIT else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL) { assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType)); // int + long => gives long // long + int => gives long // we get this because in the IL the long isn't Int64, it's just IntPtr if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } else if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } type = TYP_I_IMPL; } #else // 32-bit TARGET else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG) { assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType)); // int + long => gives long // long + int => gives long type = TYP_LONG; } #endif // TARGET_64BIT else { // int + int => gives an int assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF); assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) || (varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))); type = genActualType(op1->gtType); // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT. // Otherwise, turn floats into doubles if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT)) { assert(genActualType(op2->gtType) == TYP_DOUBLE); type = TYP_DOUBLE; } } assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT); return type; } //------------------------------------------------------------------------ // impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting // // Arguments: // op1 - value to cast // pResolvedToken - resolved token for type to cast to // isCastClass - true if this is a castclass, false if isinst // // Return Value: // tree representing optimized cast, or null if no optimization possible GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass) { assert(op1->TypeGet() == TYP_REF); // Don't optimize for minopts or debug codegen. if (opts.OptimizationDisabled()) { return nullptr; } // See what we know about the type of the object being cast. bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull); if (fromClass != nullptr) { CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass; JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst", isExact ? "exact " : "", dspPtr(fromClass), info.compCompHnd->getClassName(fromClass), dspPtr(toClass), info.compCompHnd->getClassName(toClass)); // Perhaps we know if the cast will succeed or fail. TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass); if (castResult == TypeCompareState::Must) { // Cast will succeed, result is simply op1. JITDUMP("Cast will succeed, optimizing to simply return input\n"); return op1; } else if (castResult == TypeCompareState::MustNot) { // See if we can sharpen exactness by looking for final classes if (!isExact) { isExact = impIsClassExact(fromClass); } // Cast to exact type will fail. Handle case where we have // an exact type (that is, fromClass is not a subtype) // and we're not going to throw on failure. if (isExact && !isCastClass) { JITDUMP("Cast will fail, optimizing to return null\n"); GenTree* result = gtNewIconNode(0, TYP_REF); // If the cast was fed by a box, we can remove that too. if (op1->IsBoxedValue()) { JITDUMP("Also removing upstream box\n"); gtTryRemoveBoxUpstreamEffects(op1); } return result; } else if (isExact) { JITDUMP("Not optimizing failing castclass (yet)\n"); } else { JITDUMP("Can't optimize since fromClass is inexact\n"); } } else { JITDUMP("Result of cast unknown, must generate runtime test\n"); } } else { JITDUMP("\nCan't optimize since fromClass is unknown\n"); } return nullptr; } //------------------------------------------------------------------------ // impCastClassOrIsInstToTree: build and import castclass/isinst // // Arguments: // op1 - value to cast // op2 - type handle for type to cast to // pResolvedToken - resolved token from the cast operation // isCastClass - true if this is castclass, false means isinst // // Return Value: // Tree representing the cast // // Notes: // May expand into a series of runtime checks or a helper call. GenTree* Compiler::impCastClassOrIsInstToTree( GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset) { assert(op1->TypeGet() == TYP_REF); // Optimistically assume the jit should expand this as an inline test bool shouldExpandInline = true; // Profitability check. // // Don't bother with inline expansion when jit is trying to // generate code quickly, or the cast is in code that won't run very // often, or the method already is pretty big. if (compCurBB->isRunRarely() || opts.OptimizationDisabled()) { // not worth the code expansion if jitting fast or in a rarely run block shouldExpandInline = false; } else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals()) { // not worth creating an untracked local variable shouldExpandInline = false; } else if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) && (JitConfig.JitCastProfiling() == 1)) { // Optimizations are enabled but we're still instrumenting (including casts) if (isCastClass && !impIsClassExact(pResolvedToken->hClass)) { // Usually, we make a speculative assumption that it makes sense to expand castclass // even for non-sealed classes, but let's rely on PGO in this specific case shouldExpandInline = false; } } // Pessimistically assume the jit cannot expand this as an inline test bool canExpandInline = false; const CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass); // Legality check. // // Not all classclass/isinst operations can be inline expanded. // Check legality only if an inline expansion is desirable. if (shouldExpandInline) { if (isCastClass) { // Jit can only inline expand the normal CHKCASTCLASS helper. canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS); } else { if (helper == CORINFO_HELP_ISINSTANCEOFCLASS) { // If the class is exact, the jit can expand the IsInst check inline. canExpandInline = impIsClassExact(pResolvedToken->hClass); } } } const bool expandInline = canExpandInline && shouldExpandInline; if (!expandInline) { JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst", canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal"); // If we CSE this class handle we prevent assertionProp from making SubType assertions // so instead we force the CSE logic to not consider CSE-ing this class handle. // op2->gtFlags |= GTF_DONT_CSE; GenTreeCall* call = gtNewHelperCallNode(helper, TYP_REF, gtNewCallArgs(op2, op1)); if (impIsCastHelperEligibleForClassProbe(call) && !impIsClassExact(pResolvedToken->hClass)) { ClassProfileCandidateInfo* pInfo = new (this, CMK_Inlining) ClassProfileCandidateInfo; pInfo->ilOffset = ilOffset; pInfo->probeIndex = info.compClassProbeCount++; call->gtClassProfileCandidateInfo = pInfo; compCurBB->bbFlags |= BBF_HAS_CLASS_PROFILE; } return call; } JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst"); impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2")); GenTree* temp; GenTree* condMT; // // expand the methodtable match: // // condMT ==> GT_NE // / \. // GT_IND op2 (typically CNS_INT) // | // op1Copy // // This can replace op1 with a GT_COMMA that evaluates op1 into a local // op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1")); // // op1 is now known to be a non-complex tree // thus we can use gtClone(op1) from now on // GenTree* op2Var = op2; if (isCastClass) { op2Var = fgInsertCommaFormTemp(&op2); lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true; } temp = gtNewMethodTableLookup(temp); condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2); GenTree* condNull; // // expand the null check: // // condNull ==> GT_EQ // / \. // op1Copy CNS_INT // null // condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF)); // // expand the true and false trees for the condMT // GenTree* condFalse = gtClone(op1); GenTree* condTrue; if (isCastClass) { // // use the special helper that skips the cases checked by our inlined cast // const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL; condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewCallArgs(op2Var, gtClone(op1))); } else { condTrue = gtNewIconNode(0, TYP_REF); } GenTree* qmarkMT; // // Generate first QMARK - COLON tree // // qmarkMT ==> GT_QMARK // / \. // condMT GT_COLON // / \. // condFalse condTrue // temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse); qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp->AsColon()); if (isCastClass && impIsClassExact(pResolvedToken->hClass) && condTrue->OperIs(GT_CALL)) { // condTrue is used only for throwing InvalidCastException in case of casting to an exact class. condTrue->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN; } GenTree* qmarkNull; // // Generate second QMARK - COLON tree // // qmarkNull ==> GT_QMARK // / \. // condNull GT_COLON // / \. // qmarkMT op1Copy // temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT); qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp->AsColon()); qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF; // Make QMark node a top level node by spilling it. unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2")); impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE); // TODO-CQ: Is it possible op1 has a better type? // // See also gtGetHelperCallClassHandle where we make the same // determination for the helper call variants. LclVarDsc* lclDsc = lvaGetDesc(tmp); assert(lclDsc->lvSingleDef == 0); lclDsc->lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmp); lvaSetClass(tmp, pResolvedToken->hClass); return gtNewLclvNode(tmp, TYP_REF); } #ifndef DEBUG #define assertImp(cond) ((void)0) #else #define assertImp(cond) \ do \ { \ if (!(cond)) \ { \ const int cchAssertImpBuf = 600; \ char* assertImpBuf = (char*)_alloca(cchAssertImpBuf); \ _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \ "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \ impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \ op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \ assertAbort(assertImpBuf, __FILE__, __LINE__); \ } \ } while (0) #endif // DEBUG //------------------------------------------------------------------------ // impBlockIsInALoop: check if a block might be in a loop // // Arguments: // block - block to check // // Returns: // true if the block might be in a loop. // // Notes: // Conservatively correct; may return true for some blocks that are // not actually in loops. // bool Compiler::impBlockIsInALoop(BasicBlock* block) { return (compIsForInlining() && ((impInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) != 0)) || ((block->bbFlags & BBF_BACKWARD_JUMP) != 0); } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif /***************************************************************************** * Import the instr for the given basic block */ void Compiler::impImportBlockCode(BasicBlock* block) { #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind) #ifdef DEBUG if (verbose) { printf("\nImporting " FMT_BB " (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName); } #endif unsigned nxtStmtIndex = impInitBlockLineInfo(); IL_OFFSET nxtStmtOffs; CorInfoHelpFunc helper; CorInfoIsAccessAllowedResult accessAllowedResult; CORINFO_HELPER_DESC calloutHelper; const BYTE* lastLoadToken = nullptr; /* Get the tree list started */ impBeginTreeList(); #ifdef FEATURE_ON_STACK_REPLACEMENT bool enablePatchpoints = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0); #ifdef DEBUG // Optionally suppress patchpoints by method hash // static ConfigMethodRange JitEnablePatchpointRange; JitEnablePatchpointRange.EnsureInit(JitConfig.JitEnablePatchpointRange()); const unsigned hash = impInlineRoot()->info.compMethodHash(); const bool inRange = JitEnablePatchpointRange.Contains(hash); enablePatchpoints &= inRange; #endif // DEBUG if (enablePatchpoints) { // We don't inline at Tier0, if we do, we may need rethink our approach. // Could probably support inlines that don't introduce flow. // assert(!compIsForInlining()); // OSR is not yet supported for methods with explicit tail calls. // // But we also do not have to switch these methods to be optimized, as we should be // able to avoid getting trapped in Tier0 code by normal call counting. // So instead, just suppress adding patchpoints. // if (!compTailPrefixSeen) { // We only need to add patchpoints if the method can loop. // if (compHasBackwardJump) { assert(compCanHavePatchpoints()); // By default we use the "adaptive" strategy. // // This can create both source and target patchpoints within a given // loop structure, which isn't ideal, but is not incorrect. We will // just have some extra Tier0 overhead. // // Todo: implement support for mid-block patchpoints. If `block` // is truly a backedge source (and not in a handler) then we should be // able to find a stack empty point somewhere in the block. // const int patchpointStrategy = JitConfig.TC_PatchpointStrategy(); bool addPatchpoint = false; bool mustUseTargetPatchpoint = false; switch (patchpointStrategy) { default: { // Patchpoints at backedge sources, if possible, otherwise targets. // addPatchpoint = ((block->bbFlags & BBF_BACKWARD_JUMP_SOURCE) == BBF_BACKWARD_JUMP_SOURCE); mustUseTargetPatchpoint = (verCurrentState.esStackDepth != 0) || block->hasHndIndex(); break; } case 1: { // Patchpoints at stackempty backedge targets. // Note if we have loops where the IL stack is not empty on the backedge we can't patchpoint // them. // // We should not have allowed OSR if there were backedges in handlers. // assert(!block->hasHndIndex()); addPatchpoint = ((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET) && (verCurrentState.esStackDepth == 0); break; } case 2: { // Adaptive strategy. // // Patchpoints at backedge targets if there are multiple backedges, // otherwise at backedge sources, if possible. Note a block can be both; if so we // just need one patchpoint. // if ((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET) { // We don't know backedge count, so just use ref count. // addPatchpoint = (block->bbRefs > 1) && (verCurrentState.esStackDepth == 0); } if (!addPatchpoint && ((block->bbFlags & BBF_BACKWARD_JUMP_SOURCE) == BBF_BACKWARD_JUMP_SOURCE)) { addPatchpoint = true; mustUseTargetPatchpoint = (verCurrentState.esStackDepth != 0) || block->hasHndIndex(); // Also force target patchpoint if target block has multiple (backedge) preds. // if (!mustUseTargetPatchpoint) { for (BasicBlock* const succBlock : block->Succs(this)) { if ((succBlock->bbNum <= block->bbNum) && (succBlock->bbRefs > 1)) { mustUseTargetPatchpoint = true; break; } } } } break; } } if (addPatchpoint) { if (mustUseTargetPatchpoint) { // We wanted a source patchpoint, but could not have one. // So, add patchpoints to the backedge targets. // for (BasicBlock* const succBlock : block->Succs(this)) { if (succBlock->bbNum <= block->bbNum) { // The succBlock had better agree it's a target. // assert((succBlock->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET); // We may already have decided to put a patchpoint in succBlock. If not, add one. // if ((succBlock->bbFlags & BBF_PATCHPOINT) != 0) { // In some cases the target may not be stack-empty at entry. // If so, we will bypass patchpoints for this backedge. // if (succBlock->bbStackDepthOnEntry() > 0) { JITDUMP("\nCan't set source patchpoint at " FMT_BB ", can't use target " FMT_BB " as it has non-empty stack on entry.\n", block->bbNum, succBlock->bbNum); } else { JITDUMP("\nCan't set source patchpoint at " FMT_BB ", using target " FMT_BB " instead\n", block->bbNum, succBlock->bbNum); assert(!succBlock->hasHndIndex()); succBlock->bbFlags |= BBF_PATCHPOINT; } } } } } else { assert(!block->hasHndIndex()); block->bbFlags |= BBF_PATCHPOINT; } setMethodHasPatchpoint(); } } else { // Should not see backward branch targets w/o backwards branches. // So if !compHasBackwardsBranch, these flags should never be set. // assert((block->bbFlags & (BBF_BACKWARD_JUMP_TARGET | BBF_BACKWARD_JUMP_SOURCE)) == 0); } } #ifdef DEBUG // As a stress test, we can place patchpoints at the start of any block // that is a stack empty point and is not within a handler. // // Todo: enable for mid-block stack empty points too. // const int offsetOSR = JitConfig.JitOffsetOnStackReplacement(); const int randomOSR = JitConfig.JitRandomOnStackReplacement(); const bool tryOffsetOSR = offsetOSR >= 0; const bool tryRandomOSR = randomOSR > 0; if (compCanHavePatchpoints() && (tryOffsetOSR || tryRandomOSR) && (verCurrentState.esStackDepth == 0) && !block->hasHndIndex() && ((block->bbFlags & BBF_PATCHPOINT) == 0)) { // Block start can have a patchpoint. See if we should add one. // bool addPatchpoint = false; // Specific offset? // if (tryOffsetOSR) { if (impCurOpcOffs == (unsigned)offsetOSR) { addPatchpoint = true; } } // Random? // else { // Reuse the random inliner's random state. // Note m_inlineStrategy is always created, even if we're not inlining. // CLRRandom* const random = impInlineRoot()->m_inlineStrategy->GetRandom(randomOSR); const int randomValue = (int)random->Next(100); addPatchpoint = (randomValue < randomOSR); } if (addPatchpoint) { block->bbFlags |= BBF_PATCHPOINT; setMethodHasPatchpoint(); } JITDUMP("\n** %s patchpoint%s added to " FMT_BB " (il offset %u)\n", tryOffsetOSR ? "offset" : "random", addPatchpoint ? "" : " not", block->bbNum, impCurOpcOffs); } #endif // DEBUG } // Mark stack-empty rare blocks to be considered for partial compilation. // // Ideally these are conditionally executed blocks -- if the method is going // to unconditionally throw, there's not as much to be gained by deferring jitting. // For now, we just screen out the entry bb. // // In general we might want track all the IL stack empty points so we can // propagate rareness back through flow and place the partial compilation patchpoints "earlier" // so there are fewer overall. // // Note unlike OSR, it's ok to forgo these. // // Todo: stress mode... // if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_PartialCompilation() > 0) && compCanHavePatchpoints() && !compTailPrefixSeen) { // Is this block a good place for partial compilation? // if ((block != fgFirstBB) && block->isRunRarely() && (verCurrentState.esStackDepth == 0) && ((block->bbFlags & BBF_PATCHPOINT) == 0) && !block->hasHndIndex()) { JITDUMP("\nBlock " FMT_BB " will be a partial compilation patchpoint -- not importing\n", block->bbNum); block->bbFlags |= BBF_PARTIAL_COMPILATION_PATCHPOINT; setMethodHasPartialCompilationPatchpoint(); // Change block to BBJ_THROW so we won't trigger importation of successors. // block->bbJumpKind = BBJ_THROW; // If this method has a explicit generic context, the only uses of it may be in // the IL for this block. So assume it's used. // if (info.compMethodInfo->options & (CORINFO_GENERICS_CTXT_FROM_METHODDESC | CORINFO_GENERICS_CTXT_FROM_METHODTABLE)) { lvaGenericsContextInUse = true; } return; } } #endif // FEATURE_ON_STACK_REPLACEMENT /* Walk the opcodes that comprise the basic block */ const BYTE* codeAddr = info.compCode + block->bbCodeOffs; const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd; IL_OFFSET opcodeOffs = block->bbCodeOffs; IL_OFFSET lastSpillOffs = opcodeOffs; signed jmpDist; /* remember the start of the delegate creation sequence (used for verification) */ const BYTE* delegateCreateStart = nullptr; int prefixFlags = 0; bool explicitTailCall, constraintCall, readonlyCall; typeInfo tiRetVal; unsigned numArgs = info.compArgsCount; /* Now process all the opcodes in the block */ var_types callTyp = TYP_COUNT; OPCODE prevOpcode = CEE_ILLEGAL; if (block->bbCatchTyp) { if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { impCurStmtOffsSet(block->bbCodeOffs); } // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block // to a temp. This is a trade off for code simplicity impSpillSpecialSideEff(); } while (codeAddr < codeEndp) { #ifdef FEATURE_READYTORUN bool usingReadyToRunHelper = false; #endif CORINFO_RESOLVED_TOKEN resolvedToken; CORINFO_RESOLVED_TOKEN constrainedResolvedToken; CORINFO_CALL_INFO callInfo; CORINFO_FIELD_INFO fieldInfo; tiRetVal = typeInfo(); // Default type info //--------------------------------------------------------------------- /* We need to restrict the max tree depth as many of the Compiler functions are recursive. We do this by spilling the stack */ if (verCurrentState.esStackDepth) { /* Has it been a while since we last saw a non-empty stack (which guarantees that the tree depth isnt accumulating. */ if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode)) { impSpillStackEnsure(); lastSpillOffs = opcodeOffs; } } else { lastSpillOffs = opcodeOffs; impBoxTempInUse = false; // nothing on the stack, box temp OK to use again } /* Compute the current instr offset */ opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); #ifndef DEBUG if (opts.compDbgInfo) #endif { nxtStmtOffs = (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET; /* Have we reached the next stmt boundary ? */ if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs) { assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]); if (verCurrentState.esStackDepth != 0 && opts.compDbgCode) { /* We need to provide accurate IP-mapping at this point. So spill anything on the stack so that it will form gtStmts with the correct stmt offset noted */ impSpillStackEnsure(true); } // Have we reported debug info for any tree? if (impCurStmtDI.IsValid() && opts.compDbgCode) { GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); assert(!impCurStmtDI.IsValid()); } if (!impCurStmtDI.IsValid()) { /* Make sure that nxtStmtIndex is in sync with opcodeOffs. If opcodeOffs has gone past nxtStmtIndex, catch up */ while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount && info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs) { nxtStmtIndex++; } /* Go to the new stmt */ impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]); /* Update the stmt boundary index */ nxtStmtIndex++; assert(nxtStmtIndex <= info.compStmtOffsetsCount); /* Are there any more line# entries after this one? */ if (nxtStmtIndex < info.compStmtOffsetsCount) { /* Remember where the next line# starts */ nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex]; } else { /* No more line# entries */ nxtStmtOffs = BAD_IL_OFFSET; } } } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) && (verCurrentState.esStackDepth == 0)) { /* At stack-empty locations, we have already added the tree to the stmt list with the last offset. We just need to update impCurStmtDI */ impCurStmtOffsSet(opcodeOffs); } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) && impOpcodeIsCallSiteBoundary(prevOpcode)) { /* Make sure we have a type cached */ assert(callTyp != TYP_COUNT); if (callTyp == TYP_VOID) { impCurStmtOffsSet(opcodeOffs); } else if (opts.compDbgCode) { impSpillStackEnsure(true); impCurStmtOffsSet(opcodeOffs); } } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP)) { if (opts.compDbgCode) { impSpillStackEnsure(true); } impCurStmtOffsSet(opcodeOffs); } assert(!impCurStmtDI.IsValid() || (nxtStmtOffs == BAD_IL_OFFSET) || (impCurStmtDI.GetLocation().GetOffset() <= nxtStmtOffs)); } CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL); CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL); CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL); var_types lclTyp, ovflType = TYP_UNKNOWN; GenTree* op1 = DUMMY_INIT(NULL); GenTree* op2 = DUMMY_INIT(NULL); GenTree* newObjThisPtr = DUMMY_INIT(NULL); bool uns = DUMMY_INIT(false); bool isLocal = false; /* Get the next opcode and the size of its parameters */ OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); #ifdef DEBUG impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1); JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs); #endif DECODE_OPCODE: // Return if any previous code has caused inline to fail. if (compDonotInline()) { return; } /* Get the size of additional parameters */ signed int sz = opcodeSizes[opcode]; #ifdef DEBUG clsHnd = NO_CLASS_HANDLE; lclTyp = TYP_COUNT; callTyp = TYP_COUNT; impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1); impCurOpcName = opcodeNames[opcode]; if (verbose && (opcode != CEE_PREFIX1)) { printf("%s", impCurOpcName); } /* Use assertImp() to display the opcode */ op1 = op2 = nullptr; #endif /* See what kind of an opcode we have, then */ unsigned mflags = 0; unsigned clsFlags = 0; switch (opcode) { unsigned lclNum; var_types type; GenTree* op3; genTreeOps oper; unsigned size; int val; CORINFO_SIG_INFO sig; IL_OFFSET jmpAddr; bool ovfl, unordered, callNode; bool ldstruct; CORINFO_CLASS_HANDLE tokenType; union { int intVal; float fltVal; __int64 lngVal; double dblVal; } cval; case CEE_PREFIX1: opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256); opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); codeAddr += sizeof(__int8); goto DECODE_OPCODE; SPILL_APPEND: // We need to call impSpillLclRefs() for a struct type lclVar. // This is because there may be loads of that lclVar on the evaluation stack, and // we need to ensure that those loads are completed before we modify it. if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtGetOp1())) { GenTree* lhs = op1->gtGetOp1(); GenTreeLclVarCommon* lclVar = nullptr; if (lhs->gtOper == GT_LCL_VAR) { lclVar = lhs->AsLclVarCommon(); } else if (lhs->OperIsBlk()) { // Check if LHS address is within some struct local, to catch // cases where we're updating the struct by something other than a stfld GenTree* addr = lhs->AsBlk()->Addr(); // Catches ADDR(LCL_VAR), or ADD(ADDR(LCL_VAR),CNS_INT)) lclVar = addr->IsLocalAddrExpr(); // Catches ADDR(FIELD(... ADDR(LCL_VAR))) if (lclVar == nullptr) { GenTree* lclTree = nullptr; if (impIsAddressInLocal(addr, &lclTree)) { lclVar = lclTree->AsLclVarCommon(); } } } if (lclVar != nullptr) { impSpillLclRefs(lclVar->GetLclNum()); } } /* Append 'op1' to the list of statements */ impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); goto DONE_APPEND; APPEND: /* Append 'op1' to the list of statements */ impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); goto DONE_APPEND; DONE_APPEND: #ifdef DEBUG // Remember at which BC offset the tree was finished impNoteLastILoffs(); #endif break; case CEE_LDNULL: impPushNullObjRefOnStack(); break; case CEE_LDC_I4_M1: case CEE_LDC_I4_0: case CEE_LDC_I4_1: case CEE_LDC_I4_2: case CEE_LDC_I4_3: case CEE_LDC_I4_4: case CEE_LDC_I4_5: case CEE_LDC_I4_6: case CEE_LDC_I4_7: case CEE_LDC_I4_8: cval.intVal = (opcode - CEE_LDC_I4_0); assert(-1 <= cval.intVal && cval.intVal <= 8); goto PUSH_I4CON; case CEE_LDC_I4_S: cval.intVal = getI1LittleEndian(codeAddr); goto PUSH_I4CON; case CEE_LDC_I4: cval.intVal = getI4LittleEndian(codeAddr); goto PUSH_I4CON; PUSH_I4CON: JITDUMP(" %d", cval.intVal); impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT)); break; case CEE_LDC_I8: cval.lngVal = getI8LittleEndian(codeAddr); JITDUMP(" 0x%016llx", cval.lngVal); impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG)); break; case CEE_LDC_R8: cval.dblVal = getR8LittleEndian(codeAddr); JITDUMP(" %#.17g", cval.dblVal); impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE)); break; case CEE_LDC_R4: cval.dblVal = getR4LittleEndian(codeAddr); JITDUMP(" %#.17g", cval.dblVal); impPushOnStack(gtNewDconNode(cval.dblVal, TYP_FLOAT), typeInfo(TI_DOUBLE)); break; case CEE_LDSTR: val = getU4LittleEndian(codeAddr); JITDUMP(" %08X", val); impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal); break; case CEE_LDARG: lclNum = getU2LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDARG_S: lclNum = getU1LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDARG_0: case CEE_LDARG_1: case CEE_LDARG_2: case CEE_LDARG_3: lclNum = (opcode - CEE_LDARG_0); assert(lclNum >= 0 && lclNum < 4); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC: lclNum = getU2LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC_S: lclNum = getU1LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC_0: case CEE_LDLOC_1: case CEE_LDLOC_2: case CEE_LDLOC_3: lclNum = (opcode - CEE_LDLOC_0); assert(lclNum >= 0 && lclNum < 4); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_STARG: lclNum = getU2LittleEndian(codeAddr); goto STARG; case CEE_STARG_S: lclNum = getU1LittleEndian(codeAddr); STARG: JITDUMP(" %u", lclNum); if (compIsForInlining()) { op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo); noway_assert(op1->gtOper == GT_LCL_VAR); lclNum = op1->AsLclVar()->GetLclNum(); goto VAR_ST_VALID; } lclNum = compMapILargNum(lclNum); // account for possible hidden param assertImp(lclNum < numArgs); if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } // We should have seen this arg write in the prescan assert(lvaTable[lclNum].lvHasILStoreOp); goto VAR_ST; case CEE_STLOC: lclNum = getU2LittleEndian(codeAddr); isLocal = true; JITDUMP(" %u", lclNum); goto LOC_ST; case CEE_STLOC_S: lclNum = getU1LittleEndian(codeAddr); isLocal = true; JITDUMP(" %u", lclNum); goto LOC_ST; case CEE_STLOC_0: case CEE_STLOC_1: case CEE_STLOC_2: case CEE_STLOC_3: isLocal = true; lclNum = (opcode - CEE_STLOC_0); assert(lclNum >= 0 && lclNum < 4); LOC_ST: if (compIsForInlining()) { lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo; /* Have we allocated a temp for this local? */ lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp")); goto _PopValue; } lclNum += numArgs; VAR_ST: if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var) { BADCODE("Bad IL"); } VAR_ST_VALID: /* if it is a struct assignment, make certain we don't overflow the buffer */ assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd)); if (lvaTable[lclNum].lvNormalizeOnLoad()) { lclTyp = lvaGetRealType(lclNum); } else { lclTyp = lvaGetActualType(lclNum); } _PopValue: /* Pop the value being assigned */ { StackEntry se = impPopStack(); clsHnd = se.seTypeInfo.GetClassHandle(); op1 = se.val; tiRetVal = se.seTypeInfo; } #ifdef FEATURE_SIMD if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet())) { assert(op1->TypeGet() == TYP_STRUCT); op1->gtType = lclTyp; } #endif // FEATURE_SIMD op1 = impImplicitIorI4Cast(op1, lclTyp); #ifdef TARGET_64BIT // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT)) { op1 = gtNewCastNode(TYP_INT, op1, false, TYP_INT); } #endif // TARGET_64BIT // We had better assign it a value of the correct type assertImp( genActualType(lclTyp) == genActualType(op1->gtType) || (genActualType(lclTyp) == TYP_I_IMPL && op1->IsLocalAddrExpr() != nullptr) || (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) || (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) || (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) || ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF)); /* If op1 is "&var" then its type is the transient "*" and it can be used either as TYP_BYREF or TYP_I_IMPL */ if (op1->IsLocalAddrExpr() != nullptr) { assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF); /* When "&var" is created, we assume it is a byref. If it is being assigned to a TYP_I_IMPL var, change the type to prevent unnecessary GC info */ if (genActualType(lclTyp) == TYP_I_IMPL) { op1->gtType = TYP_I_IMPL; } } // If this is a local and the local is a ref type, see // if we can improve type information based on the // value being assigned. if (isLocal && (lclTyp == TYP_REF)) { // We should have seen a stloc in our IL prescan. assert(lvaTable[lclNum].lvHasILStoreOp); // Is there just one place this local is defined? const bool isSingleDefLocal = lvaTable[lclNum].lvSingleDef; // Conservative check that there is just one // definition that reaches this store. const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0); if (isSingleDefLocal && hasSingleReachingDef) { lvaUpdateClass(lclNum, op1, clsHnd); } } /* Filter out simple assignments to itself */ if (op1->gtOper == GT_LCL_VAR && lclNum == op1->AsLclVarCommon()->GetLclNum()) { if (opts.compDbgCode) { op1 = gtNewNothingNode(); goto SPILL_APPEND; } else { break; } } /* Create the assignment node */ op2 = gtNewLclvNode(lclNum, lclTyp DEBUGARG(opcodeOffs + sz + 1)); /* If the local is aliased or pinned, we need to spill calls and indirections from the stack. */ if ((lvaTable[lclNum].IsAddressExposed() || lvaTable[lclNum].lvHasLdAddrOp || lvaTable[lclNum].lvPinned) && (verCurrentState.esStackDepth > 0)) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned")); } /* Spill any refs to the local from the stack */ impSpillLclRefs(lclNum); // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE // We insert a cast to the dest 'op2' type // if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { op1 = gtNewCastNode(op2->TypeGet(), op1, false, op2->TypeGet()); } if (varTypeIsStruct(lclTyp)) { op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL); } else { op1 = gtNewAssignNode(op2, op1); } goto SPILL_APPEND; case CEE_LDLOCA: lclNum = getU2LittleEndian(codeAddr); goto LDLOCA; case CEE_LDLOCA_S: lclNum = getU1LittleEndian(codeAddr); LDLOCA: JITDUMP(" %u", lclNum); if (compIsForInlining()) { // Get the local type lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo; /* Have we allocated a temp for this local? */ lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp")); assert(!lvaGetDesc(lclNum)->lvNormalizeOnLoad()); op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum)); goto _PUSH_ADRVAR; } lclNum += numArgs; assertImp(lclNum < info.compLocalsCount); goto ADRVAR; case CEE_LDARGA: lclNum = getU2LittleEndian(codeAddr); goto LDARGA; case CEE_LDARGA_S: lclNum = getU1LittleEndian(codeAddr); LDARGA: JITDUMP(" %u", lclNum); Verify(lclNum < info.compILargsCount, "bad arg num"); if (compIsForInlining()) { // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument, // followed by a ldfld to load the field. op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo); if (op1->gtOper != GT_LCL_VAR) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR); return; } assert(op1->gtOper == GT_LCL_VAR); goto _PUSH_ADRVAR; } lclNum = compMapILargNum(lclNum); // account for possible hidden param assertImp(lclNum < numArgs); if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } goto ADRVAR; ADRVAR: op1 = impCreateLocalNode(lclNum DEBUGARG(opcodeOffs + sz + 1)); _PUSH_ADRVAR: assert(op1->gtOper == GT_LCL_VAR); /* Note that this is supposed to create the transient type "*" which may be used as a TYP_I_IMPL. However we catch places where it is used as a TYP_I_IMPL and change the node if needed. Thus we are pessimistic and may report byrefs in the GC info where it was not absolutely needed, but it is safer this way. */ op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1); // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does assert((op1->gtFlags & GTF_GLOB_REF) == 0); tiRetVal = lvaTable[lclNum].lvVerTypeInfo; impPushOnStack(op1, tiRetVal); break; case CEE_ARGLIST: if (!info.compIsVarArgs) { BADCODE("arglist in non-vararg method"); } assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG); /* The ARGLIST cookie is a hidden 'last' parameter, we have already adjusted the arg count cos this is like fetching the last param */ assertImp(0 < numArgs); lclNum = lvaVarargsHandleArg; op1 = gtNewLclvNode(lclNum, TYP_I_IMPL DEBUGARG(opcodeOffs + sz + 1)); op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1); impPushOnStack(op1, tiRetVal); break; case CEE_ENDFINALLY: if (compIsForInlining()) { assert(!"Shouldn't have exception handlers in the inliner!"); compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY); return; } if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } if (info.compXcptnsCount == 0) { BADCODE("endfinally outside finally"); } assert(verCurrentState.esStackDepth == 0); op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr); goto APPEND; case CEE_ENDFILTER: if (compIsForInlining()) { assert(!"Shouldn't have exception handlers in the inliner!"); compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER); return; } block->bbSetRunRarely(); // filters are rare if (info.compXcptnsCount == 0) { BADCODE("endfilter outside filter"); } op1 = impPopStack().val; assertImp(op1->gtType == TYP_INT); if (!bbInFilterILRange(block)) { BADCODE("EndFilter outside a filter handler"); } /* Mark current bb as end of filter */ assert(compCurBB->bbFlags & BBF_DONT_REMOVE); assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET); /* Mark catch handler as successor */ op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1); if (verCurrentState.esStackDepth != 0) { verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__) DEBUGARG(__LINE__)); } goto APPEND; case CEE_RET: prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it RET: if (!impReturnInstruction(prefixFlags, opcode)) { return; // abort } else { break; } case CEE_JMP: assert(!compIsForInlining()); if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex()) { /* CEE_JMP does not make sense in some "protected" regions. */ BADCODE("Jmp not allowed in protected region"); } if (opts.IsReversePInvoke()) { BADCODE("Jmp not allowed in reverse P/Invoke"); } if (verCurrentState.esStackDepth != 0) { BADCODE("Stack must be empty after CEE_JMPs"); } _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); /* The signature of the target has to be identical to ours. At least check that argCnt and returnType match */ eeGetMethodSig(resolvedToken.hMethod, &sig); if (sig.numArgs != info.compMethodInfo->args.numArgs || sig.retType != info.compMethodInfo->args.retType || sig.callConv != info.compMethodInfo->args.callConv) { BADCODE("Incompatible target for CEE_JMPs"); } op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod); /* Mark the basic block as being a JUMP instead of RETURN */ block->bbFlags |= BBF_HAS_JMP; /* Set this flag to make sure register arguments have a location assigned * even if we don't use them inside the method */ compJmpOpUsed = true; fgNoStructPromotion = true; goto APPEND; case CEE_LDELEMA: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); ldelemClsHnd = resolvedToken.hClass; // If it's a value class array we just do a simple address-of if (eeIsValueClass(ldelemClsHnd)) { CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd); if (cit == CORINFO_TYPE_UNDEF) { lclTyp = TYP_STRUCT; } else { lclTyp = JITtype2varType(cit); } goto ARR_LD_POST_VERIFY; } // Similarly, if its a readonly access, we can do a simple address-of // without doing a runtime type-check if (prefixFlags & PREFIX_READONLY) { lclTyp = TYP_REF; goto ARR_LD_POST_VERIFY; } // Otherwise we need the full helper function with run-time type check op1 = impTokenToHandle(&resolvedToken); if (op1 == nullptr) { // compDonotInline() return; } { GenTreeCall::Use* args = gtNewCallArgs(op1); // Type args = gtPrependNewCallArg(impPopStack().val, args); // index args = gtPrependNewCallArg(impPopStack().val, args); // array op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args); } impPushOnStack(op1, tiRetVal); break; // ldelem for reference and value types case CEE_LDELEM: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); ldelemClsHnd = resolvedToken.hClass; // If it's a reference type or generic variable type // then just generate code as though it's a ldelem.ref instruction if (!eeIsValueClass(ldelemClsHnd)) { lclTyp = TYP_REF; opcode = CEE_LDELEM_REF; } else { CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd); lclTyp = JITtype2varType(jitTyp); tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct tiRetVal.NormaliseForStack(); } goto ARR_LD_POST_VERIFY; case CEE_LDELEM_I1: lclTyp = TYP_BYTE; goto ARR_LD; case CEE_LDELEM_I2: lclTyp = TYP_SHORT; goto ARR_LD; case CEE_LDELEM_I: lclTyp = TYP_I_IMPL; goto ARR_LD; // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter // and treating it as TYP_INT avoids other asserts. case CEE_LDELEM_U4: lclTyp = TYP_INT; goto ARR_LD; case CEE_LDELEM_I4: lclTyp = TYP_INT; goto ARR_LD; case CEE_LDELEM_I8: lclTyp = TYP_LONG; goto ARR_LD; case CEE_LDELEM_REF: lclTyp = TYP_REF; goto ARR_LD; case CEE_LDELEM_R4: lclTyp = TYP_FLOAT; goto ARR_LD; case CEE_LDELEM_R8: lclTyp = TYP_DOUBLE; goto ARR_LD; case CEE_LDELEM_U1: lclTyp = TYP_UBYTE; goto ARR_LD; case CEE_LDELEM_U2: lclTyp = TYP_USHORT; goto ARR_LD; ARR_LD: ARR_LD_POST_VERIFY: /* Pull the index value and array address */ op2 = impPopStack().val; op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); /* Check for null pointer - in the inliner case we simply abort */ if (compIsForInlining()) { if (op1->gtOper == GT_CNS_INT) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM); return; } } /* Mark the block as containing an index expression */ if (op1->gtOper == GT_LCL_VAR) { if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD) { block->bbFlags |= BBF_HAS_IDX_LEN; optMethodFlags |= OMF_HAS_ARRAYREF; } } /* Create the index node and push it on the stack */ op1 = gtNewIndexRef(lclTyp, op1, op2); ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT); if ((opcode == CEE_LDELEMA) || ldstruct || (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd))) { assert(ldelemClsHnd != DUMMY_INIT(NULL)); // remember the element size if (lclTyp == TYP_REF) { op1->AsIndex()->gtIndElemSize = TARGET_POINTER_SIZE; } else { // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type. if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF) { op1->AsIndex()->gtStructElemClass = ldelemClsHnd; } assert(lclTyp != TYP_STRUCT || op1->AsIndex()->gtStructElemClass != nullptr); if (lclTyp == TYP_STRUCT) { size = info.compCompHnd->getClassSize(ldelemClsHnd); op1->AsIndex()->gtIndElemSize = size; op1->gtType = lclTyp; } } if ((opcode == CEE_LDELEMA) || ldstruct) { // wrap it in a & lclTyp = TYP_BYREF; op1 = gtNewOperNode(GT_ADDR, lclTyp, op1); } else { assert(lclTyp != TYP_STRUCT); } } if (ldstruct) { // Create an OBJ for the result op1 = gtNewObjNode(ldelemClsHnd, op1); op1->gtFlags |= GTF_EXCEPT; } impPushOnStack(op1, tiRetVal); break; // stelem for reference and value types case CEE_STELEM: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); stelemClsHnd = resolvedToken.hClass; // If it's a reference type just behave as though it's a stelem.ref instruction if (!eeIsValueClass(stelemClsHnd)) { goto STELEM_REF_POST_VERIFY; } // Otherwise extract the type { CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd); lclTyp = JITtype2varType(jitTyp); goto ARR_ST_POST_VERIFY; } case CEE_STELEM_REF: STELEM_REF_POST_VERIFY: if (opts.OptimizationEnabled()) { GenTree* array = impStackTop(2).val; GenTree* value = impStackTop().val; // Is this a case where we can skip the covariant store check? if (impCanSkipCovariantStoreCheck(value, array)) { lclTyp = TYP_REF; goto ARR_ST_POST_VERIFY; } } // Else call a helper function to do the assignment op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopCallArgs(3, nullptr)); goto SPILL_APPEND; case CEE_STELEM_I1: lclTyp = TYP_BYTE; goto ARR_ST; case CEE_STELEM_I2: lclTyp = TYP_SHORT; goto ARR_ST; case CEE_STELEM_I: lclTyp = TYP_I_IMPL; goto ARR_ST; case CEE_STELEM_I4: lclTyp = TYP_INT; goto ARR_ST; case CEE_STELEM_I8: lclTyp = TYP_LONG; goto ARR_ST; case CEE_STELEM_R4: lclTyp = TYP_FLOAT; goto ARR_ST; case CEE_STELEM_R8: lclTyp = TYP_DOUBLE; goto ARR_ST; ARR_ST: ARR_ST_POST_VERIFY: /* The strict order of evaluation is LHS-operands, RHS-operands, range-check, and then assignment. However, codegen currently does the range-check before evaluation the RHS-operands. So to maintain strict ordering, we spill the stack. */ if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Strict ordering of exceptions for Array store")); } /* Pull the new value from the stack */ op2 = impPopStack().val; /* Pull the index value */ op1 = impPopStack().val; /* Pull the array address */ op3 = impPopStack().val; assertImp(op3->gtType == TYP_REF); if (op2->IsLocalAddrExpr() != nullptr) { op2->gtType = TYP_I_IMPL; } // Mark the block as containing an index expression if (op3->gtOper == GT_LCL_VAR) { if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD) { block->bbFlags |= BBF_HAS_IDX_LEN; optMethodFlags |= OMF_HAS_ARRAYREF; } } /* Create the index node */ op1 = gtNewIndexRef(lclTyp, op3, op1); /* Create the assignment node and append it */ if (lclTyp == TYP_STRUCT) { assert(stelemClsHnd != DUMMY_INIT(NULL)); op1->AsIndex()->gtStructElemClass = stelemClsHnd; op1->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd); } if (varTypeIsStruct(op1)) { op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL); } else { op2 = impImplicitR4orR8Cast(op2, op1->TypeGet()); op1 = gtNewAssignNode(op1, op2); } /* Mark the expression as containing an assignment */ op1->gtFlags |= GTF_ASG; goto SPILL_APPEND; case CEE_ADD: oper = GT_ADD; goto MATH_OP2; case CEE_ADD_OVF: uns = false; goto ADD_OVF; case CEE_ADD_OVF_UN: uns = true; goto ADD_OVF; ADD_OVF: ovfl = true; callNode = false; oper = GT_ADD; goto MATH_OP2_FLAGS; case CEE_SUB: oper = GT_SUB; goto MATH_OP2; case CEE_SUB_OVF: uns = false; goto SUB_OVF; case CEE_SUB_OVF_UN: uns = true; goto SUB_OVF; SUB_OVF: ovfl = true; callNode = false; oper = GT_SUB; goto MATH_OP2_FLAGS; case CEE_MUL: oper = GT_MUL; goto MATH_MAYBE_CALL_NO_OVF; case CEE_MUL_OVF: uns = false; goto MUL_OVF; case CEE_MUL_OVF_UN: uns = true; goto MUL_OVF; MUL_OVF: ovfl = true; oper = GT_MUL; goto MATH_MAYBE_CALL_OVF; // Other binary math operations case CEE_DIV: oper = GT_DIV; goto MATH_MAYBE_CALL_NO_OVF; case CEE_DIV_UN: oper = GT_UDIV; goto MATH_MAYBE_CALL_NO_OVF; case CEE_REM: oper = GT_MOD; goto MATH_MAYBE_CALL_NO_OVF; case CEE_REM_UN: oper = GT_UMOD; goto MATH_MAYBE_CALL_NO_OVF; MATH_MAYBE_CALL_NO_OVF: ovfl = false; MATH_MAYBE_CALL_OVF: // Morpher has some complex logic about when to turn different // typed nodes on different platforms into helper calls. We // need to either duplicate that logic here, or just // pessimistically make all the nodes large enough to become // call nodes. Since call nodes aren't that much larger and // these opcodes are infrequent enough I chose the latter. callNode = true; goto MATH_OP2_FLAGS; case CEE_AND: oper = GT_AND; goto MATH_OP2; case CEE_OR: oper = GT_OR; goto MATH_OP2; case CEE_XOR: oper = GT_XOR; goto MATH_OP2; MATH_OP2: // For default values of 'ovfl' and 'callNode' ovfl = false; callNode = false; MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set /* Pull two values and push back the result */ op2 = impPopStack().val; op1 = impPopStack().val; /* Can't do arithmetic with references */ assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF); // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only // if it is in the stack) impBashVarAddrsToI(op1, op2); type = impGetByRefResultType(oper, uns, &op1, &op2); assert(!ovfl || !varTypeIsFloating(op1->gtType)); /* Special case: "int+0", "int-0", "int*1", "int/1" */ if (op2->gtOper == GT_CNS_INT) { if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) || (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV))) { impPushOnStack(op1, tiRetVal); break; } } // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand // if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { if (op1->TypeGet() != type) { // We insert a cast of op1 to 'type' op1 = gtNewCastNode(type, op1, false, type); } if (op2->TypeGet() != type) { // We insert a cast of op2 to 'type' op2 = gtNewCastNode(type, op2, false, type); } } if (callNode) { /* These operators can later be transformed into 'GT_CALL' */ assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]); #ifndef TARGET_ARM assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]); #endif // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying // that we'll need to transform into a general large node, but rather specifically // to a call: by doing it this way, things keep working if there are multiple sizes, // and a CALL is no longer the largest. // That said, as of now it *is* a large node, so we'll do this with an assert rather // than an "if". assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE); op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true)); } else { op1 = gtNewOperNode(oper, type, op1, op2); } /* Special case: integer/long division may throw an exception */ if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this)) { op1->gtFlags |= GTF_EXCEPT; } if (ovfl) { assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL); if (ovflType != TYP_UNKNOWN) { op1->gtType = ovflType; } op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW); if (uns) { op1->gtFlags |= GTF_UNSIGNED; } } impPushOnStack(op1, tiRetVal); break; case CEE_SHL: oper = GT_LSH; goto CEE_SH_OP2; case CEE_SHR: oper = GT_RSH; goto CEE_SH_OP2; case CEE_SHR_UN: oper = GT_RSZ; goto CEE_SH_OP2; CEE_SH_OP2: op2 = impPopStack().val; op1 = impPopStack().val; // operand to be shifted impBashVarAddrsToI(op1, op2); type = genActualType(op1->TypeGet()); op1 = gtNewOperNode(oper, type, op1, op2); impPushOnStack(op1, tiRetVal); break; case CEE_NOT: op1 = impPopStack().val; impBashVarAddrsToI(op1, nullptr); type = genActualType(op1->TypeGet()); impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal); break; case CEE_CKFINITE: op1 = impPopStack().val; type = op1->TypeGet(); op1 = gtNewOperNode(GT_CKFINITE, type, op1); op1->gtFlags |= GTF_EXCEPT; impPushOnStack(op1, tiRetVal); break; case CEE_LEAVE: val = getI4LittleEndian(codeAddr); // jump distance jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val); goto LEAVE; case CEE_LEAVE_S: val = getI1LittleEndian(codeAddr); // jump distance jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val); LEAVE: if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE); return; } JITDUMP(" %04X", jmpAddr); if (block->bbJumpKind != BBJ_LEAVE) { impResetLeaveBlock(block, jmpAddr); } assert(jmpAddr == block->bbJumpDest->bbCodeOffs); impImportLeave(block); impNoteBranchOffs(); break; case CEE_BR: case CEE_BR_S: jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr); if (compIsForInlining() && jmpDist == 0) { break; /* NOP */ } impNoteBranchOffs(); break; case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: /* Pop the comparand (now there's a neat term) from the stack */ op1 = impPopStack().val; type = op1->TypeGet(); // Per Ecma-355, brfalse and brtrue are only specified for nint, ref, and byref. // // We've historically been a bit more permissive, so here we allow // any type that gtNewZeroConNode can handle. if (!varTypeIsArithmetic(type) && !varTypeIsGC(type)) { BADCODE("invalid type for brtrue/brfalse"); } if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext)) { block->bbJumpKind = BBJ_NONE; if (op1->gtFlags & GTF_GLOB_EFFECT) { op1 = gtUnusedValNode(op1); goto SPILL_APPEND; } else { break; } } if (op1->OperIsCompare()) { if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S) { // Flip the sense of the compare op1 = gtReverseCond(op1); } } else { // We'll compare against an equally-sized integer 0 // For small types, we always compare against int op2 = gtNewZeroConNode(genActualType(op1->gtType)); // Create the comparison operator and try to fold it oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ; op1 = gtNewOperNode(oper, TYP_INT, op1, op2); } // fall through COND_JUMP: /* Fold comparison if we can */ op1 = gtFoldExpr(op1); /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/ /* Don't make any blocks unreachable in import only mode */ if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly()) { /* gtFoldExpr() should prevent this as we don't want to make any blocks unreachable under compDbgCode */ assert(!opts.compDbgCode); BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->AsIntCon()->gtIconVal ? BBJ_ALWAYS : BBJ_NONE); assertImp((block->bbJumpKind == BBJ_COND) // normal case || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the // block for the second time block->bbJumpKind = foldedJumpKind; #ifdef DEBUG if (verbose) { if (op1->AsIntCon()->gtIconVal) { printf("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n", block->bbJumpDest->bbNum); } else { printf("\nThe block falls through into the next " FMT_BB "\n", block->bbNext->bbNum); } } #endif break; } op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1); /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt' in impImportBlock(block). For correct line numbers, spill stack. */ if (opts.compDbgCode && impCurStmtDI.IsValid()) { impSpillStackEnsure(true); } goto SPILL_APPEND; case CEE_CEQ: oper = GT_EQ; uns = false; goto CMP_2_OPs; case CEE_CGT_UN: oper = GT_GT; uns = true; goto CMP_2_OPs; case CEE_CGT: oper = GT_GT; uns = false; goto CMP_2_OPs; case CEE_CLT_UN: oper = GT_LT; uns = true; goto CMP_2_OPs; case CEE_CLT: oper = GT_LT; uns = false; goto CMP_2_OPs; CMP_2_OPs: op2 = impPopStack().val; op1 = impPopStack().val; // Recognize the IL idiom of CGT_UN(op1, 0) and normalize // it so that downstream optimizations don't have to. if ((opcode == CEE_CGT_UN) && op2->IsIntegralConst(0)) { oper = GT_NE; uns = false; } #ifdef TARGET_64BIT // TODO-Casts: create a helper that upcasts int32 -> native int when necessary. // See also identical code in impGetByRefResultType and STSFLD import. if (varTypeIsI(op1) && (genActualType(op2) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, TYP_I_IMPL); } else if (varTypeIsI(op2) && (genActualType(op1) == TYP_INT)) { op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, TYP_I_IMPL); } #endif // TARGET_64BIT assertImp(genActualType(op1) == genActualType(op2) || (varTypeIsI(op1) && varTypeIsI(op2)) || (varTypeIsFloating(op1) && varTypeIsFloating(op2))); // Create the comparison node. op1 = gtNewOperNode(oper, TYP_INT, op1, op2); // TODO: setting both flags when only one is appropriate. if (uns) { op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED; } // Fold result, if possible. op1 = gtFoldExpr(op1); impPushOnStack(op1, tiRetVal); break; case CEE_BEQ_S: case CEE_BEQ: oper = GT_EQ; goto CMP_2_OPs_AND_BR; case CEE_BGE_S: case CEE_BGE: oper = GT_GE; goto CMP_2_OPs_AND_BR; case CEE_BGE_UN_S: case CEE_BGE_UN: oper = GT_GE; goto CMP_2_OPs_AND_BR_UN; case CEE_BGT_S: case CEE_BGT: oper = GT_GT; goto CMP_2_OPs_AND_BR; case CEE_BGT_UN_S: case CEE_BGT_UN: oper = GT_GT; goto CMP_2_OPs_AND_BR_UN; case CEE_BLE_S: case CEE_BLE: oper = GT_LE; goto CMP_2_OPs_AND_BR; case CEE_BLE_UN_S: case CEE_BLE_UN: oper = GT_LE; goto CMP_2_OPs_AND_BR_UN; case CEE_BLT_S: case CEE_BLT: oper = GT_LT; goto CMP_2_OPs_AND_BR; case CEE_BLT_UN_S: case CEE_BLT_UN: oper = GT_LT; goto CMP_2_OPs_AND_BR_UN; case CEE_BNE_UN_S: case CEE_BNE_UN: oper = GT_NE; goto CMP_2_OPs_AND_BR_UN; CMP_2_OPs_AND_BR_UN: uns = true; unordered = true; goto CMP_2_OPs_AND_BR_ALL; CMP_2_OPs_AND_BR: uns = false; unordered = false; goto CMP_2_OPs_AND_BR_ALL; CMP_2_OPs_AND_BR_ALL: /* Pull two values */ op2 = impPopStack().val; op1 = impPopStack().val; #ifdef TARGET_64BIT if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL); } else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT)) { op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) || (varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet())) || (varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))); if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext)) { block->bbJumpKind = BBJ_NONE; if (op1->gtFlags & GTF_GLOB_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Branch to next Optimization, op1 side effect")); impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } if (op2->gtFlags & GTF_GLOB_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Branch to next Optimization, op2 side effect")); impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } #ifdef DEBUG if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT) { impNoteLastILoffs(); } #endif break; } // We can generate an compare of different sized floating point op1 and op2 // We insert a cast // if (varTypeIsFloating(op1->TypeGet())) { if (op1->TypeGet() != op2->TypeGet()) { assert(varTypeIsFloating(op2->TypeGet())); // say op1=double, op2=float. To avoid loss of precision // while comparing, op2 is converted to double and double // comparison is done. if (op1->TypeGet() == TYP_DOUBLE) { // We insert a cast of op2 to TYP_DOUBLE op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE); } else if (op2->TypeGet() == TYP_DOUBLE) { // We insert a cast of op1 to TYP_DOUBLE op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE); } } } /* Create and append the operator */ op1 = gtNewOperNode(oper, TYP_INT, op1, op2); if (uns) { op1->gtFlags |= GTF_UNSIGNED; } if (unordered) { op1->gtFlags |= GTF_RELOP_NAN_UN; } goto COND_JUMP; case CEE_SWITCH: /* Pop the switch value off the stack */ op1 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op1->TypeGet())); /* We can create a switch node */ op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1); val = (int)getU4LittleEndian(codeAddr); codeAddr += 4 + val * 4; // skip over the switch-table goto SPILL_APPEND; /************************** Casting OPCODES ***************************/ case CEE_CONV_OVF_I1: lclTyp = TYP_BYTE; goto CONV_OVF; case CEE_CONV_OVF_I2: lclTyp = TYP_SHORT; goto CONV_OVF; case CEE_CONV_OVF_I: lclTyp = TYP_I_IMPL; goto CONV_OVF; case CEE_CONV_OVF_I4: lclTyp = TYP_INT; goto CONV_OVF; case CEE_CONV_OVF_I8: lclTyp = TYP_LONG; goto CONV_OVF; case CEE_CONV_OVF_U1: lclTyp = TYP_UBYTE; goto CONV_OVF; case CEE_CONV_OVF_U2: lclTyp = TYP_USHORT; goto CONV_OVF; case CEE_CONV_OVF_U: lclTyp = TYP_U_IMPL; goto CONV_OVF; case CEE_CONV_OVF_U4: lclTyp = TYP_UINT; goto CONV_OVF; case CEE_CONV_OVF_U8: lclTyp = TYP_ULONG; goto CONV_OVF; case CEE_CONV_OVF_I1_UN: lclTyp = TYP_BYTE; goto CONV_OVF_UN; case CEE_CONV_OVF_I2_UN: lclTyp = TYP_SHORT; goto CONV_OVF_UN; case CEE_CONV_OVF_I_UN: lclTyp = TYP_I_IMPL; goto CONV_OVF_UN; case CEE_CONV_OVF_I4_UN: lclTyp = TYP_INT; goto CONV_OVF_UN; case CEE_CONV_OVF_I8_UN: lclTyp = TYP_LONG; goto CONV_OVF_UN; case CEE_CONV_OVF_U1_UN: lclTyp = TYP_UBYTE; goto CONV_OVF_UN; case CEE_CONV_OVF_U2_UN: lclTyp = TYP_USHORT; goto CONV_OVF_UN; case CEE_CONV_OVF_U_UN: lclTyp = TYP_U_IMPL; goto CONV_OVF_UN; case CEE_CONV_OVF_U4_UN: lclTyp = TYP_UINT; goto CONV_OVF_UN; case CEE_CONV_OVF_U8_UN: lclTyp = TYP_ULONG; goto CONV_OVF_UN; CONV_OVF_UN: uns = true; goto CONV_OVF_COMMON; CONV_OVF: uns = false; goto CONV_OVF_COMMON; CONV_OVF_COMMON: ovfl = true; goto _CONV; case CEE_CONV_I1: lclTyp = TYP_BYTE; goto CONV; case CEE_CONV_I2: lclTyp = TYP_SHORT; goto CONV; case CEE_CONV_I: lclTyp = TYP_I_IMPL; goto CONV; case CEE_CONV_I4: lclTyp = TYP_INT; goto CONV; case CEE_CONV_I8: lclTyp = TYP_LONG; goto CONV; case CEE_CONV_U1: lclTyp = TYP_UBYTE; goto CONV; case CEE_CONV_U2: lclTyp = TYP_USHORT; goto CONV; #if (REGSIZE_BYTES == 8) case CEE_CONV_U: lclTyp = TYP_U_IMPL; goto CONV_UN; #else case CEE_CONV_U: lclTyp = TYP_U_IMPL; goto CONV; #endif case CEE_CONV_U4: lclTyp = TYP_UINT; goto CONV; case CEE_CONV_U8: lclTyp = TYP_ULONG; goto CONV_UN; case CEE_CONV_R4: lclTyp = TYP_FLOAT; goto CONV; case CEE_CONV_R8: lclTyp = TYP_DOUBLE; goto CONV; case CEE_CONV_R_UN: lclTyp = TYP_DOUBLE; goto CONV_UN; CONV_UN: uns = true; ovfl = false; goto _CONV; CONV: uns = false; ovfl = false; goto _CONV; _CONV: // only converts from FLOAT or DOUBLE to an integer type // and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls if (varTypeIsFloating(lclTyp)) { callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl #ifdef TARGET_64BIT // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK? // TYP_BYREF could be used as TYP_I_IMPL which is long. // TODO-CQ: remove this when we lower casts long/ulong --> float/double // and generate SSE2 code instead of going through helper calls. || (impStackTop().val->TypeGet() == TYP_BYREF) #endif ; } else { callNode = varTypeIsFloating(impStackTop().val->TypeGet()); } op1 = impPopStack().val; impBashVarAddrsToI(op1); // Casts from floating point types must not have GTF_UNSIGNED set. if (varTypeIsFloating(op1)) { uns = false; } // At this point uns, ovf, callNode are all set. if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND) { op2 = op1->AsOp()->gtOp2; if (op2->gtOper == GT_CNS_INT) { ssize_t ival = op2->AsIntCon()->gtIconVal; ssize_t mask, umask; switch (lclTyp) { case TYP_BYTE: case TYP_UBYTE: mask = 0x00FF; umask = 0x007F; break; case TYP_USHORT: case TYP_SHORT: mask = 0xFFFF; umask = 0x7FFF; break; default: assert(!"unexpected type"); return; } if (((ival & umask) == ival) || ((ival & mask) == ival && uns)) { /* Toss the cast, it's a waste of time */ impPushOnStack(op1, tiRetVal); break; } else if (ival == mask) { /* Toss the masking, it's a waste of time, since we sign-extend from the small value anyways */ op1 = op1->AsOp()->gtOp1; } } } /* The 'op2' sub-operand of a cast is the 'real' type number, since the result of a cast to one of the 'small' integer types is an integer. */ type = genActualType(lclTyp); // If this is a no-op cast, just use op1. if (!ovfl && (type == op1->TypeGet()) && (genTypeSize(type) == genTypeSize(lclTyp))) { // Nothing needs to change } // Work is evidently required, add cast node else { if (callNode) { op1 = gtNewCastNodeL(type, op1, uns, lclTyp); } else { op1 = gtNewCastNode(type, op1, uns, lclTyp); } if (ovfl) { op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT); } if (op1->gtGetOp1()->OperIsConst() && opts.OptimizationEnabled()) { // Try and fold the introduced cast op1 = gtFoldExprConst(op1); } } impPushOnStack(op1, tiRetVal); break; case CEE_NEG: op1 = impPopStack().val; impBashVarAddrsToI(op1, nullptr); impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal); break; case CEE_POP: { /* Pull the top value from the stack */ StackEntry se = impPopStack(); clsHnd = se.seTypeInfo.GetClassHandle(); op1 = se.val; /* Get hold of the type of the value being duplicated */ lclTyp = genActualType(op1->gtType); /* Does the value have any side effects? */ if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode) { // Since we are throwing away the value, just normalize // it to its address. This is more efficient. if (varTypeIsStruct(op1)) { JITDUMP("\n ... CEE_POP struct ...\n"); DISPTREE(op1); #ifdef UNIX_AMD64_ABI // Non-calls, such as obj or ret_expr, have to go through this. // Calls with large struct return value have to go through this. // Helper calls with small struct return value also have to go // through this since they do not follow Unix calling convention. if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd, op1->AsCall()->GetUnmanagedCallConv()) || op1->AsCall()->gtCallType == CT_HELPER) #endif // UNIX_AMD64_ABI { // If the value being produced comes from loading // via an underlying address, just null check the address. if (op1->OperIs(GT_FIELD, GT_IND, GT_OBJ)) { gtChangeOperToNullCheck(op1, block); } else { op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false); } JITDUMP("\n ... optimized to ...\n"); DISPTREE(op1); } } // If op1 is non-overflow cast, throw it away since it is useless. // Another reason for throwing away the useless cast is in the context of // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)). // The cast gets added as part of importing GT_CALL, which gets in the way // of fgMorphCall() on the forms of tail call nodes that we assert. if ((op1->gtOper == GT_CAST) && !op1->gtOverflow()) { op1 = op1->AsOp()->gtOp1; } if (op1->gtOper != GT_CALL) { if ((op1->gtFlags & GTF_SIDE_EFFECT) != 0) { op1 = gtUnusedValNode(op1); } else { // Can't bash to NOP here because op1 can be referenced from `currentBlock->bbEntryState`, // if we ever need to reimport we need a valid LCL_VAR on it. op1 = gtNewNothingNode(); } } /* Append the value to the tree list */ goto SPILL_APPEND; } /* No side effects - just throw the <BEEP> thing away */ } break; case CEE_DUP: { StackEntry se = impPopStack(); GenTree* tree = se.val; tiRetVal = se.seTypeInfo; op1 = tree; // If the expression to dup is simple, just clone it. // Otherwise spill it to a temp, and reload the temp twice. bool cloneExpr = false; if (!opts.compDbgCode) { // Duplicate 0 and +0.0 if (op1->IsIntegralConst(0) || op1->IsFloatPositiveZero()) { cloneExpr = true; } // Duplicate locals and addresses of them else if (op1->IsLocal()) { cloneExpr = true; } else if (op1->TypeIs(TYP_BYREF) && op1->OperIs(GT_ADDR) && op1->gtGetOp1()->IsLocal() && (OPCODE)impGetNonPrefixOpcode(codeAddr + sz, codeEndp) != CEE_INITOBJ) { cloneExpr = true; } } else { // Always clone for debug mode cloneExpr = true; } if (!cloneExpr) { const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill")); impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); var_types type = genActualType(lvaTable[tmpNum].TypeGet()); op1 = gtNewLclvNode(tmpNum, type); // Propagate type info to the temp from the stack and the original tree if (type == TYP_REF) { assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def local\n", tmpNum); lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle()); } } op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("DUP instruction")); assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT)); impPushOnStack(op1, tiRetVal); impPushOnStack(op2, tiRetVal); } break; case CEE_STIND_I1: lclTyp = TYP_BYTE; goto STIND; case CEE_STIND_I2: lclTyp = TYP_SHORT; goto STIND; case CEE_STIND_I4: lclTyp = TYP_INT; goto STIND; case CEE_STIND_I8: lclTyp = TYP_LONG; goto STIND; case CEE_STIND_I: lclTyp = TYP_I_IMPL; goto STIND; case CEE_STIND_REF: lclTyp = TYP_REF; goto STIND; case CEE_STIND_R4: lclTyp = TYP_FLOAT; goto STIND; case CEE_STIND_R8: lclTyp = TYP_DOUBLE; goto STIND; STIND: op2 = impPopStack().val; // value to store op1 = impPopStack().val; // address to store to // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); impBashVarAddrsToI(op1, op2); op2 = impImplicitR4orR8Cast(op2, lclTyp); #ifdef TARGET_64BIT // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType)) { op2->gtType = TYP_I_IMPL; } else { // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity // if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT)) { op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT); } // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL); } } #endif // TARGET_64BIT if (opcode == CEE_STIND_REF) { // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType)); lclTyp = genActualType(op2->TypeGet()); } // Check target type. #ifdef DEBUG if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF) { if (op2->gtType == TYP_BYREF) { assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL); } else if (lclTyp == TYP_BYREF) { assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType)); } } else { assertImp(genActualType(op2->gtType) == genActualType(lclTyp) || ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) || (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp))); } #endif op1 = gtNewOperNode(GT_IND, lclTyp, op1); // stind could point anywhere, example a boxed class static int op1->gtFlags |= GTF_IND_TGTANYWHERE; if (prefixFlags & PREFIX_VOLATILE) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_IND_UNALIGNED; } op1 = gtNewAssignNode(op1, op2); op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF; // Spill side-effects AND global-data-accesses if (verCurrentState.esStackDepth > 0) { impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND")); } goto APPEND; case CEE_LDIND_I1: lclTyp = TYP_BYTE; goto LDIND; case CEE_LDIND_I2: lclTyp = TYP_SHORT; goto LDIND; case CEE_LDIND_U4: case CEE_LDIND_I4: lclTyp = TYP_INT; goto LDIND; case CEE_LDIND_I8: lclTyp = TYP_LONG; goto LDIND; case CEE_LDIND_REF: lclTyp = TYP_REF; goto LDIND; case CEE_LDIND_I: lclTyp = TYP_I_IMPL; goto LDIND; case CEE_LDIND_R4: lclTyp = TYP_FLOAT; goto LDIND; case CEE_LDIND_R8: lclTyp = TYP_DOUBLE; goto LDIND; case CEE_LDIND_U1: lclTyp = TYP_UBYTE; goto LDIND; case CEE_LDIND_U2: lclTyp = TYP_USHORT; goto LDIND; LDIND: op1 = impPopStack().val; // address to load from impBashVarAddrsToI(op1); #ifdef TARGET_64BIT // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (genActualType(op1->gtType) == TYP_INT) { op1 = gtNewCastNode(TYP_I_IMPL, op1, false, TYP_I_IMPL); } #endif assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); op1 = gtNewOperNode(GT_IND, lclTyp, op1); // ldind could point anywhere, example a boxed class static int op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE); if (prefixFlags & PREFIX_VOLATILE) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_IND_UNALIGNED; } impPushOnStack(op1, tiRetVal); break; case CEE_UNALIGNED: assert(sz == 1); val = getU1LittleEndian(codeAddr); ++codeAddr; JITDUMP(" %u", val); if ((val != 1) && (val != 2) && (val != 4)) { BADCODE("Alignment unaligned. must be 1, 2, or 4"); } Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes"); prefixFlags |= PREFIX_UNALIGNED; impValidateMemoryAccessOpcode(codeAddr, codeEndp, false); PREFIX: opcode = (OPCODE)getU1LittleEndian(codeAddr); opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); codeAddr += sizeof(__int8); goto DECODE_OPCODE; case CEE_VOLATILE: Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes"); prefixFlags |= PREFIX_VOLATILE; impValidateMemoryAccessOpcode(codeAddr, codeEndp, true); assert(sz == 0); goto PREFIX; case CEE_LDFTN: { // Need to do a lookup here so that we perform an access check // and do a NOWAY if protections are violated _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); eeGetCallInfo(&resolvedToken, (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr, combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN), &callInfo); // This check really only applies to intrinsic Array.Address methods if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE) { NO_WAY("Currently do not support LDFTN of Parameterized functions"); } // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own. impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); DO_LDFTN: op1 = impMethodPointer(&resolvedToken, &callInfo); if (compDonotInline()) { return; } // Call info may have more precise information about the function than // the resolved token. CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken); assert(callInfo.hMethod != nullptr); heapToken->hMethod = callInfo.hMethod; impPushOnStack(op1, typeInfo(heapToken)); break; } case CEE_LDVIRTFTN: { /* Get the method token */ _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */, combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN), CORINFO_CALLINFO_CALLVIRT), &callInfo); // This check really only applies to intrinsic Array.Address methods if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE) { NO_WAY("Currently do not support LDFTN of Parameterized functions"); } mflags = callInfo.methodFlags; impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); if (compIsForInlining()) { if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL)) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL); return; } } CORINFO_SIG_INFO& ftnSig = callInfo.sig; /* Get the object-ref */ op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); if (opts.IsReadyToRun()) { if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN) { if (op1->gtFlags & GTF_SIDE_EFFECT) { op1 = gtUnusedValNode(op1); impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } goto DO_LDFTN; } } else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL)) { if (op1->gtFlags & GTF_SIDE_EFFECT) { op1 = gtUnusedValNode(op1); impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } goto DO_LDFTN; } GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo); if (compDonotInline()) { return; } CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken); assert(heapToken->tokenType == CORINFO_TOKENKIND_Method); assert(callInfo.hMethod != nullptr); heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn; heapToken->hMethod = callInfo.hMethod; impPushOnStack(fptr, typeInfo(heapToken)); break; } case CEE_CONSTRAINED: assertImp(sz == sizeof(unsigned)); impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained); codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually JITDUMP(" (%08X) ", constrainedResolvedToken.token); Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes"); prefixFlags |= PREFIX_CONSTRAINED; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (actualOpcode != CEE_CALLVIRT && actualOpcode != CEE_CALL && actualOpcode != CEE_LDFTN) { BADCODE("constrained. has to be followed by callvirt, call or ldftn"); } } goto PREFIX; case CEE_READONLY: JITDUMP(" readonly."); Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes"); prefixFlags |= PREFIX_READONLY; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("readonly. has to be followed by ldelema or call"); } } assert(sz == 0); goto PREFIX; case CEE_TAILCALL: JITDUMP(" tail."); Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes"); prefixFlags |= PREFIX_TAILCALL_EXPLICIT; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (!impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("tailcall. has to be followed by call, callvirt or calli"); } } assert(sz == 0); goto PREFIX; case CEE_NEWOBJ: /* Since we will implicitly insert newObjThisPtr at the start of the argument list, spill any GTF_ORDER_SIDEEFF */ impSpillSpecialSideEff(); /* NEWOBJ does not respond to TAIL */ prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT; /* NEWOBJ does not respond to CONSTRAINED */ prefixFlags &= ~PREFIX_CONSTRAINED; _impResolveToken(CORINFO_TOKENKIND_NewObj); eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/, combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM), &callInfo); mflags = callInfo.methodFlags; if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0) { BADCODE("newobj on static or abstract method"); } // Insert the security callout before any actual code is generated impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); // There are three different cases for new // Object size is variable (depends on arguments) // 1) Object is an array (arrays treated specially by the EE) // 2) Object is some other variable sized object (e.g. String) // 3) Class Size can be determined beforehand (normal case) // In the first case, we need to call a NEWOBJ helper (multinewarray) // in the second case we call the constructor with a '0' this pointer // In the third case we alloc the memory, then call the constuctor clsFlags = callInfo.classFlags; if (clsFlags & CORINFO_FLG_ARRAY) { // Arrays need to call the NEWOBJ helper. assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE); impImportNewObjArray(&resolvedToken, &callInfo); if (compDonotInline()) { return; } callTyp = TYP_REF; break; } // At present this can only be String else if (clsFlags & CORINFO_FLG_VAROBJSIZE) { // Skip this thisPtr argument newObjThisPtr = nullptr; /* Remember that this basic block contains 'new' of an object */ block->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; } else { // This is the normal case where the size of the object is // fixed. Allocate the memory and call the constructor. // Note: We cannot add a peep to avoid use of temp here // becase we don't have enough interference info to detect when // sources and destination interfere, example: s = new S(ref); // TODO: We find the correct place to introduce a general // reverse copy prop for struct return values from newobj or // any function returning structs. /* get a temporary for the new object */ lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp")); if (compDonotInline()) { // Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS. assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS); return; } // In the value class case we only need clsHnd for size calcs. // // The lookup of the code pointer will be handled by CALL in this case if (clsFlags & CORINFO_FLG_VALUECLASS) { if (compIsForInlining()) { // If value class has GC fields, inform the inliner. It may choose to // bail out on the inline. DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0) { compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT); if (compInlineResult->IsFailure()) { return; } // Do further notification in the case where the call site is rare; // some policies do not track the relative hotness of call sites for // "always" inline cases. if (impInlineInfo->iciBlock->isRunRarely()) { compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT); if (compInlineResult->IsFailure()) { return; } } } } CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { lvaTable[lclNum].lvType = JITtype2varType(jitTyp); } else { // The local variable itself is the allocated space. // Here we need unsafe value cls check, since the address of struct is taken for further use // and potentially exploitable. lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */); } bool bbInALoop = impBlockIsInALoop(block); bool bbIsReturn = (block->bbJumpKind == BBJ_RETURN) && (!compIsForInlining() || (impInlineInfo->iciBlock->bbJumpKind == BBJ_RETURN)); LclVarDsc* const lclDsc = lvaGetDesc(lclNum); if (fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn)) { // Append a tree to zero-out the temp newObjThisPtr = gtNewLclvNode(lclNum, lclDsc->TypeGet()); newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest gtNewIconNode(0), // Value false, // isVolatile false); // not copyBlock impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } else { JITDUMP("\nSuppressing zero-init for V%02u -- expect to zero in prolog\n", lclNum); lclDsc->lvSuppressedZeroInit = 1; compSuppressedZeroInit = true; } // Obtain the address of the temp newObjThisPtr = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet())); } else { // If we're newing up a finalizable object, spill anything that can cause exceptions. // bool hasSideEffects = false; CorInfoHelpFunc newHelper = info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd, &hasSideEffects); if (hasSideEffects) { JITDUMP("\nSpilling stack for finalizable newobj\n"); impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("finalizable newobj spill")); } const bool useParent = true; op1 = gtNewAllocObjNode(&resolvedToken, useParent); if (op1 == nullptr) { return; } // Remember that this basic block contains 'new' of an object block->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; // Append the assignment to the temp/local. Dont need to spill // at all as we are just calling an EE-Jit helper which can only // cause an (async) OutOfMemoryException. // We assign the newly allocated object (by a GT_ALLOCOBJ node) // to a temp. Note that the pattern "temp = allocObj" is required // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes // without exhaustive walk over all expressions. impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE); assert(lvaTable[lclNum].lvSingleDef == 0); lvaTable[lclNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def local\n", lclNum); lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */); newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF); } } goto CALL; case CEE_CALLI: /* CALLI does not respond to CONSTRAINED */ prefixFlags &= ~PREFIX_CONSTRAINED; FALLTHROUGH; case CEE_CALLVIRT: case CEE_CALL: // We can't call getCallInfo on the token from a CALLI, but we need it in // many other places. We unfortunately embed that knowledge here. if (opcode != CEE_CALLI) { _impResolveToken(CORINFO_TOKENKIND_Method); eeGetCallInfo(&resolvedToken, (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr, // this is how impImportCall invokes getCallInfo combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT : CORINFO_CALLINFO_NONE), &callInfo); } else { // Suppress uninitialized use warning. memset(&resolvedToken, 0, sizeof(resolvedToken)); memset(&callInfo, 0, sizeof(callInfo)); resolvedToken.token = getU4LittleEndian(codeAddr); resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; } CALL: // memberRef should be set. // newObjThisPtr should be set for CEE_NEWOBJ JITDUMP(" %08X", resolvedToken.token); constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0; bool newBBcreatedForTailcallStress; bool passedStressModeValidation; newBBcreatedForTailcallStress = false; passedStressModeValidation = true; if (compIsForInlining()) { if (compDonotInline()) { return; } // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks. assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0); } else { if (compTailCallStress()) { // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()? // Tail call stress only recognizes call+ret patterns and forces them to be // explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress // doesn't import 'ret' opcode following the call into the basic block containing // the call instead imports it to a new basic block. Note that fgMakeBasicBlocks() // is already checking that there is an opcode following call and hence it is // safe here to read next opcode without bounds check. newBBcreatedForTailcallStress = impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't // make it jump to RET. (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET bool hasTailPrefix = (prefixFlags & PREFIX_TAILCALL_EXPLICIT); if (newBBcreatedForTailcallStress && !hasTailPrefix) { // Do a more detailed evaluation of legality const bool returnFalseIfInvalid = true; const bool passedConstraintCheck = verCheckTailCallConstraint(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr, returnFalseIfInvalid); if (passedConstraintCheck) { // Now check with the runtime CORINFO_METHOD_HANDLE declaredCalleeHnd = callInfo.hMethod; bool isVirtual = (callInfo.kind == CORINFO_VIRTUALCALL_STUB) || (callInfo.kind == CORINFO_VIRTUALCALL_VTABLE); CORINFO_METHOD_HANDLE exactCalleeHnd = isVirtual ? nullptr : declaredCalleeHnd; if (info.compCompHnd->canTailCall(info.compMethodHnd, declaredCalleeHnd, exactCalleeHnd, hasTailPrefix)) // Is it legal to do tailcall? { // Stress the tailcall. JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)"); prefixFlags |= PREFIX_TAILCALL_EXPLICIT; prefixFlags |= PREFIX_TAILCALL_STRESS; } else { // Runtime disallows this tail call JITDUMP(" (Tailcall stress: runtime preventing tailcall)"); passedStressModeValidation = false; } } else { // Constraints disallow this tail call JITDUMP(" (Tailcall stress: constraint check failed)"); passedStressModeValidation = false; } } } } // This is split up to avoid goto flow warnings. bool isRecursive; isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd); // If we've already disqualified this call as a tail call under tail call stress, // don't consider it for implicit tail calling either. // // When not running under tail call stress, we may mark this call as an implicit // tail call candidate. We'll do an "equivalent" validation during impImportCall. // // Note that when running under tail call stress, a call marked as explicit // tail prefixed will not be considered for implicit tail calling. if (passedStressModeValidation && impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive)) { if (compIsForInlining()) { #if FEATURE_TAILCALL_OPT_SHARED_RETURN // Are we inlining at an implicit tail call site? If so the we can flag // implicit tail call sites in the inline body. These call sites // often end up in non BBJ_RETURN blocks, so only flag them when // we're able to handle shared returns. if (impInlineInfo->iciCall->IsImplicitTailCall()) { JITDUMP("\n (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)"); prefixFlags |= PREFIX_TAILCALL_IMPLICIT; } #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN } else { JITDUMP("\n (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)"); prefixFlags |= PREFIX_TAILCALL_IMPLICIT; } } // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call). explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0; readonlyCall = (prefixFlags & PREFIX_READONLY) != 0; if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ) { // All calls and delegates need a security callout. // For delegates, this is the call to the delegate constructor, not the access check on the // LD(virt)FTN. impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); } callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr, newObjThisPtr, prefixFlags, &callInfo, opcodeOffs); if (compDonotInline()) { // We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue. assert((callTyp == TYP_UNDEF) || (compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS)); return; } if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we // have created a new BB after the "call" // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless. { assert(!compIsForInlining()); goto RET; } break; case CEE_LDFLD: case CEE_LDSFLD: case CEE_LDFLDA: case CEE_LDSFLDA: { bool isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA); bool isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA); /* Get the CP_Fieldref index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Field); JITDUMP(" %08X", resolvedToken.token); int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET; GenTree* obj = nullptr; typeInfo* tiObj = nullptr; CORINFO_CLASS_HANDLE objType = nullptr; // used for fields if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA) { tiObj = &impStackTop().seTypeInfo; StackEntry se = impPopStack(); objType = se.seTypeInfo.GetClassHandle(); obj = se.val; if (impIsThis(obj)) { aflags |= CORINFO_ACCESS_THIS; } } eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo); // Figure out the type of the member. We always call canAccessField, so you always need this // handle CorInfoType ciType = fieldInfo.fieldType; clsHnd = fieldInfo.structType; lclTyp = JITtype2varType(ciType); if (compIsForInlining()) { switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_STATIC_TLS: compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER); return; case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: /* We may be able to inline the field accessors in specific instantiations of generic * methods */ compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER); return; default: break; } if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT && clsHnd) { if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) && !(info.compFlags & CORINFO_FLG_FORCEINLINE)) { // Loading a static valuetype field usually will cause a JitHelper to be called // for the static base. This will bloat the code. compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS); if (compInlineResult->IsFailure()) { return; } } } } tiRetVal = verMakeTypeInfo(ciType, clsHnd); if (isLoadAddress) { tiRetVal.MakeByRef(); } else { tiRetVal.NormaliseForStack(); } // Perform this check always to ensure that we get field access exceptions even with // SkipVerification. impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper); // Raise InvalidProgramException if static load accesses non-static field if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0)) { BADCODE("static access on an instance field"); } // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj. if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr) { if (obj->gtFlags & GTF_SIDE_EFFECT) { obj = gtUnusedValNode(obj); impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } obj = nullptr; } /* Preserve 'small' int types */ if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } bool usesHelper = false; switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE: #ifdef FEATURE_READYTORUN case CORINFO_FIELD_INSTANCE_WITH_BASE: #endif { // If the object is a struct, what we really want is // for the field to operate on the address of the struct. if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj)) { assert(opcode == CEE_LDFLD && objType != nullptr); obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true); } /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset); #ifdef FEATURE_READYTORUN if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE) { op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup; } #endif op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT); if (fgAddrCouldBeNull(obj)) { op1->gtFlags |= GTF_EXCEPT; } // If the object is a BYREF then our target is a value class and // it could point anywhere, example a boxed class static int if (obj->gtType == TYP_BYREF) { op1->gtFlags |= GTF_IND_TGTANYWHERE; } DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if (StructHasOverlappingFields(typeFlags)) { op1->AsField()->gtFldMayOverlap = true; } // wrap it in a address of operator if necessary if (isLoadAddress) { op1 = gtNewOperNode(GT_ADDR, (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1); } else { if (compIsForInlining() && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, nullptr, obj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } } break; case CORINFO_FIELD_STATIC_TLS: #ifdef TARGET_X86 // Legacy TLS access is implemented as intrinsic on x86 only /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset); op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation if (isLoadAddress) { op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1); } break; #else fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER; FALLTHROUGH; #endif case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp, clsHnd, nullptr); usesHelper = true; break; case CORINFO_FIELD_STATIC_ADDRESS: // Replace static read-only fields with constant if possible if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) && !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) && (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp))) { CorInfoInitClassResult initClassResult = info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd, impTokenLookupContextHandle); if (initClassResult & CORINFO_INITCLASS_INITIALIZED) { void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr); // We should always be able to access this static's address directly // assert(pFldAddr == nullptr); op1 = impImportStaticReadOnlyField(fldAddr, lclTyp); // Widen small types since we're propagating the value // instead of producing an indir. // op1->gtType = genActualType(lclTyp); goto FIELD_DONE; } } FALLTHROUGH; case CORINFO_FIELD_STATIC_RVA_ADDRESS: case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp); break; case CORINFO_FIELD_INTRINSIC_ZERO: { assert(aflags & CORINFO_ACCESS_GET); // Widen to stack type lclTyp = genActualType(lclTyp); op1 = gtNewIconNode(0, lclTyp); goto FIELD_DONE; } break; case CORINFO_FIELD_INTRINSIC_EMPTY_STRING: { assert(aflags & CORINFO_ACCESS_GET); // Import String.Empty as "" (GT_CNS_STR with a fake SconCPX = 0) op1 = gtNewSconNode(EMPTY_STRING_SCON, nullptr); goto FIELD_DONE; } break; case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN: { assert(aflags & CORINFO_ACCESS_GET); // Widen to stack type lclTyp = genActualType(lclTyp); #if BIGENDIAN op1 = gtNewIconNode(0, lclTyp); #else op1 = gtNewIconNode(1, lclTyp); #endif goto FIELD_DONE; } break; default: assert(!"Unexpected fieldAccessor"); } if (!isLoadAddress) { if (prefixFlags & PREFIX_VOLATILE) { op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered if (!usesHelper) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_OBJ)); op1->gtFlags |= GTF_IND_VOLATILE; } } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { if (!usesHelper) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_OBJ)); op1->gtFlags |= GTF_IND_UNALIGNED; } } } /* Check if the class needs explicit initialization */ if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { GenTree* helperNode = impInitClass(&resolvedToken); if (compDonotInline()) { return; } if (helperNode != nullptr) { op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1); } } FIELD_DONE: impPushOnStack(op1, tiRetVal); } break; case CEE_STFLD: case CEE_STSFLD: { bool isStoreStatic = (opcode == CEE_STSFLD); CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type) /* Get the CP_Fieldref index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Field); JITDUMP(" %08X", resolvedToken.token); int aflags = CORINFO_ACCESS_SET; GenTree* obj = nullptr; typeInfo* tiObj = nullptr; typeInfo tiVal; /* Pull the value from the stack */ StackEntry se = impPopStack(); op2 = se.val; tiVal = se.seTypeInfo; clsHnd = tiVal.GetClassHandle(); if (opcode == CEE_STFLD) { tiObj = &impStackTop().seTypeInfo; obj = impPopStack().val; if (impIsThis(obj)) { aflags |= CORINFO_ACCESS_THIS; } } eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo); // Figure out the type of the member. We always call canAccessField, so you always need this // handle CorInfoType ciType = fieldInfo.fieldType; fieldClsHnd = fieldInfo.structType; lclTyp = JITtype2varType(ciType); if (compIsForInlining()) { /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or * per-inst static? */ switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_STATIC_TLS: compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER); return; case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: /* We may be able to inline the field accessors in specific instantiations of generic * methods */ compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER); return; default: break; } } impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper); // Raise InvalidProgramException if static store accesses non-static field if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0)) { BADCODE("static access on an instance field"); } // We are using stfld on a static field. // We allow it, but need to eval any side-effects for obj if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr) { if (obj->gtFlags & GTF_SIDE_EFFECT) { obj = gtUnusedValNode(obj); impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } obj = nullptr; } /* Preserve 'small' int types */ if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE: #ifdef FEATURE_READYTORUN case CORINFO_FIELD_INSTANCE_WITH_BASE: #endif { /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset); DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if (StructHasOverlappingFields(typeFlags)) { op1->AsField()->gtFldMayOverlap = true; } #ifdef FEATURE_READYTORUN if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE) { op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup; } #endif op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT); if (fgAddrCouldBeNull(obj)) { op1->gtFlags |= GTF_EXCEPT; } // If object is a BYREF then our target is a value class and // it could point anywhere, example a boxed class static int if (obj->gtType == TYP_BYREF) { op1->gtFlags |= GTF_IND_TGTANYWHERE; } if (compIsForInlining() && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, nullptr, obj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } break; case CORINFO_FIELD_STATIC_TLS: #ifdef TARGET_X86 // Legacy TLS access is implemented as intrinsic on x86 only /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset); op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation break; #else fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER; FALLTHROUGH; #endif case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp, clsHnd, op2); goto SPILL_APPEND; case CORINFO_FIELD_STATIC_ADDRESS: case CORINFO_FIELD_STATIC_RVA_ADDRESS: case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp); break; default: assert(!"Unexpected fieldAccessor"); } // Create the member assignment, unless we have a TYP_STRUCT. bool deferStructAssign = (lclTyp == TYP_STRUCT); if (!deferStructAssign) { if (prefixFlags & PREFIX_VOLATILE) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND)); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND)); op1->gtFlags |= GTF_IND_UNALIGNED; } /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during importation and reads from the union as if it were a long during code generation. Though this can potentially read garbage, one can get lucky to have this working correctly. This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency on it. To be backward compatible, we will explicitly add an upward cast here so that it works correctly always. Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT for V4.0. */ CLANG_FORMAT_COMMENT_ANCHOR; #ifndef TARGET_64BIT // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be // generated for ARM as well as x86, so the following IR will be accepted: // STMTx (IL 0x... ???) // * ASG long // +--* CLS_VAR long // \--* CNS_INT int 2 if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) && varTypeIsLong(op1->TypeGet())) { op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet()); } #endif #ifdef TARGET_64BIT // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType)) { op2->gtType = TYP_I_IMPL; } else { // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity // if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT)) { op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT); } // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL); } } #endif // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE // We insert a cast to the dest 'op1' type // if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet()); } op1 = gtNewAssignNode(op1, op2); /* Mark the expression as containing an assignment */ op1->gtFlags |= GTF_ASG; } /* Check if the class needs explicit initialization */ if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { GenTree* helperNode = impInitClass(&resolvedToken); if (compDonotInline()) { return; } if (helperNode != nullptr) { op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1); } } /* stfld can interfere with value classes (consider the sequence ldloc, ldloca, ..., stfld, stloc). We will be conservative and spill all value class references from the stack. */ if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL))) { assert(tiObj); // If we can resolve the field to be within some local, // then just spill that local. // GenTreeLclVarCommon* const lcl = obj->IsLocalAddrExpr(); if (lcl != nullptr) { impSpillLclRefs(lcl->GetLclNum()); } else if (impIsValueType(tiObj)) { impSpillEvalStack(); } else { impSpillValueClasses(); } } /* Spill any refs to the same member from the stack */ impSpillLclRefs((ssize_t)resolvedToken.hField); /* stsfld also interferes with indirect accesses (for aliased statics) and calls. But don't need to spill other statics as we have explicitly spilled this particular static field. */ impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD")); if (deferStructAssign) { op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL); } } goto APPEND; case CEE_NEWARR: { /* Get the class type index operand */ _impResolveToken(CORINFO_TOKENKIND_Newarr); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { // Need to restore array classes before creating array objects on the heap op1 = impTokenToHandle(&resolvedToken, nullptr, true /*mustRestoreHandle*/); if (op1 == nullptr) { // compDonotInline() return; } } tiRetVal = verMakeTypeInfo(resolvedToken.hClass); accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); /* Form the arglist: array class handle, size */ op2 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op2->gtType)); #ifdef TARGET_64BIT // The array helper takes a native int for array length. // So if we have an int, explicitly extend it to be a native int. if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { if (op2->IsIntegralConst()) { op2->gtType = TYP_I_IMPL; } else { bool isUnsigned = false; op2 = gtNewCastNode(TYP_I_IMPL, op2, isUnsigned, TYP_I_IMPL); } } #endif // TARGET_64BIT #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF, gtNewCallArgs(op2)); usingReadyToRunHelper = (op1 != nullptr); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the newarr call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub // 3) Allocate the new array // Reason: performance (today, we'll always use the slow helper for the R2R generics case) // Need to restore array classes before creating array objects on the heap op1 = impTokenToHandle(&resolvedToken, nullptr, true /*mustRestoreHandle*/); if (op1 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { GenTreeCall::Use* args = gtNewCallArgs(op1, op2); /* Create a call to 'new' */ // Note that this only works for shared generic code because the same helper is used for all // reference array types op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args); } op1->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass; /* Remember that this basic block contains 'new' of an sd array */ block->bbFlags |= BBF_HAS_NEWARRAY; optMethodFlags |= OMF_HAS_NEWARRAY; /* Push the result of the call on the stack */ impPushOnStack(op1, tiRetVal); callTyp = TYP_REF; } break; case CEE_LOCALLOC: // We don't allow locallocs inside handlers if (block->hasHndIndex()) { BADCODE("Localloc can't be inside handler"); } // Get the size to allocate op2 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op2->gtType)); if (verCurrentState.esStackDepth != 0) { BADCODE("Localloc can only be used when the stack is empty"); } // If the localloc is not in a loop and its size is a small constant, // create a new local var of TYP_BLK and return its address. { bool convertedToLocal = false; // Need to aggressively fold here, as even fixed-size locallocs // will have casts in the way. op2 = gtFoldExpr(op2); if (op2->IsIntegralConst()) { const ssize_t allocSize = op2->AsIntCon()->IconValue(); bool bbInALoop = impBlockIsInALoop(block); if (allocSize == 0) { // Result is nullptr JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n"); op1 = gtNewIconNode(0, TYP_I_IMPL); convertedToLocal = true; } else if ((allocSize > 0) && !bbInALoop) { // Get the size threshold for local conversion ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE; #ifdef DEBUG // Optionally allow this to be modified maxSize = JitConfig.JitStackAllocToLocalSize(); #endif // DEBUG if (allocSize <= maxSize) { const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal")); JITDUMP("Converting stackalloc of %zd bytes to new local V%02u\n", allocSize, stackallocAsLocal); lvaTable[stackallocAsLocal].lvType = TYP_BLK; lvaTable[stackallocAsLocal].lvExactSize = (unsigned)allocSize; lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true; op1 = gtNewLclvNode(stackallocAsLocal, TYP_BLK); op1 = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1); convertedToLocal = true; if (!this->opts.compDbgEnC) { // Ensure we have stack security for this method. // Reorder layout since the converted localloc is treated as an unsafe buffer. setNeedsGSSecurityCookie(); compGSReorderStackLayout = true; } } } } if (!convertedToLocal) { // Bail out if inlining and the localloc was not converted. // // Note we might consider allowing the inline, if the call // site is not in a loop. if (compIsForInlining()) { InlineObservation obs = op2->IsIntegralConst() ? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE : InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN; compInlineResult->NoteFatal(obs); return; } op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2); // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd. op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE); // Ensure we have stack security for this method. setNeedsGSSecurityCookie(); /* The FP register may not be back to the original value at the end of the method, even if the frame size is 0, as localloc may have modified it. So we will HAVE to reset it */ compLocallocUsed = true; } else { compLocallocOptimized = true; } } impPushOnStack(op1, tiRetVal); break; case CEE_ISINST: { /* Get the type token */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Casting); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false); if (optTree != nullptr) { impPushOnStack(optTree, tiRetVal); } else { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeCall* opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF, gtNewCallArgs(op1)); usingReadyToRunHelper = (opLookup != nullptr); op1 = (usingReadyToRunHelper ? opLookup : op1); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the isinstanceof_any call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate // stub // 3) Perform the 'is instance' check on the input object // Reason: performance (today, we'll always use the slow helper for the R2R generics case) op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false, opcodeOffs); } if (compDonotInline()) { return; } impPushOnStack(op1, tiRetVal); } break; } case CEE_REFANYVAL: // get the class handle and make a ICON node out of it _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = impTokenToHandle(&resolvedToken); if (op2 == nullptr) { // compDonotInline() return; } op1 = impPopStack().val; // make certain it is normalized; op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL); // Call helper GETREFANY(classHandle, op1); op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, gtNewCallArgs(op2, op1)); impPushOnStack(op1, tiRetVal); break; case CEE_REFANYTYPE: op1 = impPopStack().val; // make certain it is normalized; op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL); if (op1->gtOper == GT_OBJ) { // Get the address of the refany op1 = op1->AsOp()->gtOp1; // Fetch the type from the correct slot op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL)); op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1); } else { assertImp(op1->gtOper == GT_MKREFANY); // The pointer may have side-effects if (op1->AsOp()->gtOp1->gtFlags & GTF_SIDE_EFFECT) { impAppendTree(op1->AsOp()->gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); #ifdef DEBUG impNoteLastILoffs(); #endif } // We already have the class handle op1 = op1->AsOp()->gtOp2; } // convert native TypeHandle to RuntimeTypeHandle { GenTreeCall::Use* helperArgs = gtNewCallArgs(op1); op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL, TYP_STRUCT, helperArgs); CORINFO_CLASS_HANDLE classHandle = impGetTypeHandleClass(); // The handle struct is returned in register op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType(); op1->AsCall()->gtRetClsHnd = classHandle; #if FEATURE_MULTIREG_RET op1->AsCall()->InitializeStructReturnType(this, classHandle, op1->AsCall()->GetUnmanagedCallConv()); #endif tiRetVal = typeInfo(TI_STRUCT, classHandle); } impPushOnStack(op1, tiRetVal); break; case CEE_LDTOKEN: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); lastLoadToken = codeAddr; _impResolveToken(CORINFO_TOKENKIND_Ldtoken); tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken); op1 = impTokenToHandle(&resolvedToken, nullptr, true); if (op1 == nullptr) { // compDonotInline() return; } helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE; assert(resolvedToken.hClass != nullptr); if (resolvedToken.hMethod != nullptr) { helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD; } else if (resolvedToken.hField != nullptr) { helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD; } GenTreeCall::Use* helperArgs = gtNewCallArgs(op1); op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs); // The handle struct is returned in register and // it could be consumed both as `TYP_STRUCT` and `TYP_REF`. op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType(); #if FEATURE_MULTIREG_RET op1->AsCall()->InitializeStructReturnType(this, tokenType, op1->AsCall()->GetUnmanagedCallConv()); #endif op1->AsCall()->gtRetClsHnd = tokenType; tiRetVal = verMakeTypeInfo(tokenType); impPushOnStack(op1, tiRetVal); } break; case CEE_UNBOX: case CEE_UNBOX_ANY: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); bool runtimeLookup; op2 = impTokenToHandle(&resolvedToken, &runtimeLookup); if (op2 == nullptr) { assert(compDonotInline()); return; } // Run this always so we can get access exceptions even with SkipVerification. accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass)) { JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n"); op1 = impPopStack().val; goto CASTCLASS; } /* Pop the object and create the unbox helper call */ /* You might think that for UNBOX_ANY we need to push a different */ /* (non-byref) type, but here we're making the tiRetVal that is used */ /* for the intermediate pointer which we then transfer onto the OBJ */ /* instruction. OBJ then creates the appropriate tiRetVal. */ op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass); assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE); // Check legality and profitability of inline expansion for unboxing. const bool canExpandInline = (helper == CORINFO_HELP_UNBOX); const bool shouldExpandInline = !compCurBB->isRunRarely() && opts.OptimizationEnabled(); if (canExpandInline && shouldExpandInline) { // See if we know anything about the type of op1, the object being unboxed. bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(op1, &isExact, &isNonNull); // We can skip the "exact" bit here as we are comparing to a value class. // compareTypesForEquality should bail on comparisions for shared value classes. if (clsHnd != NO_CLASS_HANDLE) { const TypeCompareState compare = info.compCompHnd->compareTypesForEquality(resolvedToken.hClass, clsHnd); if (compare == TypeCompareState::Must) { JITDUMP("\nOptimizing %s (%s) -- type test will succeed\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", eeGetClassName(clsHnd)); // For UNBOX, null check (if necessary), and then leave the box payload byref on the stack. if (opcode == CEE_UNBOX) { GenTree* cloneOperand; op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("optimized unbox clone")); GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* boxPayloadAddress = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, boxPayloadOffset); GenTree* nullcheck = gtNewNullCheck(op1, block); GenTree* result = gtNewOperNode(GT_COMMA, TYP_BYREF, nullcheck, boxPayloadAddress); impPushOnStack(result, tiRetVal); break; } // For UNBOX.ANY load the struct from the box payload byref (the load will nullcheck) assert(opcode == CEE_UNBOX_ANY); GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* boxPayloadAddress = gtNewOperNode(GT_ADD, TYP_BYREF, op1, boxPayloadOffset); impPushOnStack(boxPayloadAddress, tiRetVal); oper = GT_OBJ; goto OBJ; } else { JITDUMP("\nUnable to optimize %s -- can't resolve type comparison\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY"); } } else { JITDUMP("\nUnable to optimize %s -- class for [%06u] not known\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", dspTreeID(op1)); } JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY"); // we are doing normal unboxing // inline the common case of the unbox helper // UNBOX(exp) morphs into // clone = pop(exp); // ((*clone == typeToken) ? nop : helper(clone, typeToken)); // push(clone + TARGET_POINTER_SIZE) // GenTree* cloneOperand; op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("inline UNBOX clone1")); op1 = gtNewMethodTableLookup(op1); GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2); op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("inline UNBOX clone2")); op2 = impTokenToHandle(&resolvedToken); if (op2 == nullptr) { // compDonotInline() return; } op1 = gtNewHelperCallNode(helper, TYP_VOID, gtNewCallArgs(op2, op1)); op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1); op1 = gtNewQmarkNode(TYP_VOID, condBox, op1->AsColon()); // QMARK nodes cannot reside on the evaluation stack. Because there // may be other trees on the evaluation stack that side-effect the // sources of the UNBOX operation we must spill the stack. impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); // Create the address-expression to reference past the object header // to the beginning of the value-type. Today this means adjusting // past the base of the objects vtable field which is pointer sized. op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2); } else { JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal"); // Don't optimize, just call the helper and be done with it op1 = gtNewHelperCallNode(helper, (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT), gtNewCallArgs(op2, op1)); if (op1->gtType == TYP_STRUCT) { op1->AsCall()->gtRetClsHnd = resolvedToken.hClass; } } assert((helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF) || // Unbox helper returns a byref. (helper == CORINFO_HELP_UNBOX_NULLABLE && varTypeIsStruct(op1)) // UnboxNullable helper returns a struct. ); /* ---------------------------------------------------------------------- | \ helper | | | | \ | | | | \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE | | \ | (which returns a BYREF) | (which returns a STRUCT) | | | opcode \ | | | |--------------------------------------------------------------------- | UNBOX | push the BYREF | spill the STRUCT to a local, | | | | push the BYREF to this local | |--------------------------------------------------------------------- | UNBOX_ANY | push a GT_OBJ of | push the STRUCT | | | the BYREF | For Linux when the | | | | struct is returned in two | | | | registers create a temp | | | | which address is passed to | | | | the unbox_nullable helper. | |--------------------------------------------------------------------- */ if (opcode == CEE_UNBOX) { if (helper == CORINFO_HELP_UNBOX_NULLABLE) { // Unbox nullable helper returns a struct type. // We need to spill it to a temp so than can take the address of it. // Here we need unsafe value cls check, since the address of struct is taken to be used // further along and potetially be exploitable. unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable")); lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); op2 = gtNewLclvNode(tmp, TYP_STRUCT); op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclvNode(tmp, TYP_STRUCT); op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2); op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2); } assert(op1->gtType == TYP_BYREF); } else { assert(opcode == CEE_UNBOX_ANY); if (helper == CORINFO_HELP_UNBOX) { // Normal unbox helper returns a TYP_BYREF. impPushOnStack(op1, tiRetVal); oper = GT_OBJ; goto OBJ; } assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!"); #if FEATURE_MULTIREG_RET if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass, CorInfoCallConvExtension::Managed)) { // Unbox nullable helper returns a TYP_STRUCT. // For the multi-reg case we need to spill it to a temp so that // we can pass the address to the unbox_nullable jit helper. unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable")); lvaTable[tmp].lvIsMultiRegArg = true; lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); op2 = gtNewLclvNode(tmp, TYP_STRUCT); op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclvNode(tmp, TYP_STRUCT); op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2); op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2); // In this case the return value of the unbox helper is TYP_BYREF. // Make sure the right type is placed on the operand type stack. impPushOnStack(op1, tiRetVal); // Load the struct. oper = GT_OBJ; assert(op1->gtType == TYP_BYREF); goto OBJ; } else #endif // !FEATURE_MULTIREG_RET { // If non register passable struct we have it materialized in the RetBuf. assert(op1->gtType == TYP_STRUCT); tiRetVal = verMakeTypeInfo(resolvedToken.hClass); assert(tiRetVal.IsValueClass()); } } impPushOnStack(op1, tiRetVal); } break; case CEE_BOX: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Box); JITDUMP(" %08X", resolvedToken.token); accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); // Note BOX can be used on things that are not value classes, in which // case we get a NOP. However the verifier's view of the type on the // stack changes (in generic code a 'T' becomes a 'boxed T') if (!eeIsValueClass(resolvedToken.hClass)) { JITDUMP("\n Importing BOX(refClass) as NOP\n"); verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal; break; } // Look ahead for box idioms int matched = impBoxPatternMatch(&resolvedToken, codeAddr + sz, codeEndp); if (matched >= 0) { // Skip the matched IL instructions sz += matched; break; } impImportAndPushBox(&resolvedToken); if (compDonotInline()) { return; } } break; case CEE_SIZEOF: /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass)); impPushOnStack(op1, tiRetVal); break; case CEE_CASTCLASS: /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Casting); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; /* Pop the address and create the 'checked cast' helper call */ // At this point we expect typeRef to contain the token, op1 to contain the value being cast, // and op2 to contain code that creates the type handle corresponding to typeRef CASTCLASS: { GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true); if (optTree != nullptr) { impPushOnStack(optTree, tiRetVal); } else { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeCall* opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF, gtNewCallArgs(op1)); usingReadyToRunHelper = (opLookup != nullptr); op1 = (usingReadyToRunHelper ? opLookup : op1); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the chkcastany call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate // stub // 3) Check the object on the stack for the type-cast // Reason: performance (today, we'll always use the slow helper for the R2R generics case) op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true, opcodeOffs); } if (compDonotInline()) { return; } /* Push the result back on the stack */ impPushOnStack(op1, tiRetVal); } } break; case CEE_THROW: // Any block with a throw is rarely executed. block->bbSetRunRarely(); // Pop the exception object and create the 'throw' helper call op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewCallArgs(impPopStack().val)); // Fall through to clear out the eval stack. EVAL_APPEND: if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } assert(verCurrentState.esStackDepth == 0); goto APPEND; case CEE_RETHROW: assert(!compIsForInlining()); if (info.compXcptnsCount == 0) { BADCODE("rethrow outside catch"); } /* Create the 'rethrow' helper call */ op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID); goto EVAL_APPEND; case CEE_INITOBJ: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = gtNewIconNode(0); // Value op1 = impPopStack().val; // Dest if (eeIsValueClass(resolvedToken.hClass)) { op1 = gtNewStructVal(resolvedToken.hClass, op1); if (op1->OperIs(GT_OBJ)) { gtSetObjGcInfo(op1->AsObj()); } } else { size = info.compCompHnd->getClassSize(resolvedToken.hClass); assert(size == TARGET_POINTER_SIZE); op1 = gtNewBlockVal(op1, size); } op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false); goto SPILL_APPEND; case CEE_INITBLK: op3 = impPopStack().val; // Size op2 = impPopStack().val; // Value op1 = impPopStack().val; // Dst addr if (op3->IsCnsIntOrI()) { size = (unsigned)op3->AsIntConCommon()->IconValue(); op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size)); op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false); } else { if (!op2->IsIntegralConst(0)) { op2 = gtNewOperNode(GT_INIT_VAL, TYP_INT, op2); } op1 = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(op1, op2, op3); size = 0; if ((prefixFlags & PREFIX_VOLATILE) != 0) { op1->gtFlags |= GTF_BLK_VOLATILE; } } goto SPILL_APPEND; case CEE_CPBLK: op3 = impPopStack().val; // Size op2 = impPopStack().val; // Src addr op1 = impPopStack().val; // Dst addr if (op2->OperGet() == GT_ADDR) { op2 = op2->AsOp()->gtOp1; } else { op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2); } if (op3->IsCnsIntOrI()) { size = (unsigned)op3->AsIntConCommon()->IconValue(); op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size)); op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, true); } else { op1 = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(op1, op2, op3); size = 0; if ((prefixFlags & PREFIX_VOLATILE) != 0) { op1->gtFlags |= GTF_BLK_VOLATILE; } } goto SPILL_APPEND; case CEE_CPOBJ: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); if (!eeIsValueClass(resolvedToken.hClass)) { op1 = impPopStack().val; // address to load from impBashVarAddrsToI(op1); assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); op1 = gtNewOperNode(GT_IND, TYP_REF, op1); op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF; impPushOnStack(op1, typeInfo()); opcode = CEE_STIND_REF; lclTyp = TYP_REF; goto STIND; } op2 = impPopStack().val; // Src op1 = impPopStack().val; // Dest op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0)); goto SPILL_APPEND; case CEE_STOBJ: { assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); if (eeIsValueClass(resolvedToken.hClass)) { lclTyp = TYP_STRUCT; } else { lclTyp = TYP_REF; } if (lclTyp == TYP_REF) { opcode = CEE_STIND_REF; goto STIND; } CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { lclTyp = JITtype2varType(jitTyp); goto STIND; } op2 = impPopStack().val; // Value op1 = impPopStack().val; // Ptr assertImp(varTypeIsStruct(op2)); op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED)) { op1->gtFlags |= GTF_BLK_UNALIGNED; } goto SPILL_APPEND; } case CEE_MKREFANY: assert(!compIsForInlining()); // Being lazy here. Refanys are tricky in terms of gc tracking. // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany. JITDUMP("disabling struct promotion because of mkrefany\n"); fgNoStructPromotion = true; oper = GT_MKREFANY; assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = impTokenToHandle(&resolvedToken, nullptr, true); if (op2 == nullptr) { // compDonotInline() return; } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec. // But JIT32 allowed it, so we continue to allow it. assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT); // MKREFANY returns a struct. op2 is the class token. op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2); impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass())); break; case CEE_LDOBJ: { oper = GT_OBJ; assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); OBJ: tiRetVal = verMakeTypeInfo(resolvedToken.hClass); if (eeIsValueClass(resolvedToken.hClass)) { lclTyp = TYP_STRUCT; } else { lclTyp = TYP_REF; opcode = CEE_LDIND_REF; goto LDIND; } op1 = impPopStack().val; assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL); CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1); // Could point anywhere, example a boxed class static int op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF; assertImp(varTypeIsArithmetic(op1->gtType)); } else { // OBJ returns a struct // and an inline argument which is the class token of the loaded obj op1 = gtNewObjNode(resolvedToken.hClass, op1); } op1->gtFlags |= GTF_EXCEPT; if (prefixFlags & PREFIX_UNALIGNED) { op1->gtFlags |= GTF_IND_UNALIGNED; } impPushOnStack(op1, tiRetVal); break; } case CEE_LDLEN: op1 = impPopStack().val; if (opts.OptimizationEnabled()) { /* Use GT_ARR_LENGTH operator so rng check opts see this */ GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_Array__length, block); op1 = arrLen; } else { /* Create the expression "*(array_addr + ArrLenOffs)" */ op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(OFFSETOF__CORINFO_Array__length, TYP_I_IMPL)); op1 = gtNewIndir(TYP_INT, op1); } /* Push the result back on the stack */ impPushOnStack(op1, tiRetVal); break; case CEE_BREAK: op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID); goto SPILL_APPEND; case CEE_NOP: if (opts.compDbgCode) { op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); goto SPILL_APPEND; } break; /******************************** NYI *******************************/ case 0xCC: OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n"); FALLTHROUGH; case CEE_ILLEGAL: case CEE_MACRO_END: default: if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR); return; } BADCODE3("unknown opcode", ": %02X", (int)opcode); } codeAddr += sz; prevOpcode = opcode; prefixFlags = 0; } return; #undef _impResolveToken } #ifdef _PREFAST_ #pragma warning(pop) #endif // Push a local/argument treeon the operand stack void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal) { tiRetVal.NormaliseForStack(); if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr()) { tiRetVal.SetUninitialisedObjRef(); } impPushOnStack(op, tiRetVal); } //------------------------------------------------------------------------ // impCreateLocal: create a GT_LCL_VAR node to access a local that might need to be normalized on load // // Arguments: // lclNum -- The index into lvaTable // offset -- The offset to associate with the node // // Returns: // The node // GenTreeLclVar* Compiler::impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset)) { var_types lclTyp; if (lvaTable[lclNum].lvNormalizeOnLoad()) { lclTyp = lvaGetRealType(lclNum); } else { lclTyp = lvaGetActualType(lclNum); } return gtNewLclvNode(lclNum, lclTyp DEBUGARG(offset)); } // Load a local/argument on the operand stack // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal) { impPushVar(impCreateLocalNode(lclNum DEBUGARG(offset)), tiRetVal); } // Load an argument on the operand stack // Shared by the various CEE_LDARG opcodes // ilArgNum is the argument index as specified in IL. // It will be mapped to the correct lvaTable index void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset) { Verify(ilArgNum < info.compILargsCount, "bad arg num"); if (compIsForInlining()) { if (ilArgNum >= info.compArgsCount) { compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER); return; } impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo), impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo); } else { if (ilArgNum >= info.compArgsCount) { BADCODE("Bad IL"); } unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } impLoadVar(lclNum, offset); } } // Load a local on the operand stack // Shared by the various CEE_LDLOC opcodes // ilLclNum is the local index as specified in IL. // It will be mapped to the correct lvaTable index void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset) { if (compIsForInlining()) { if (ilLclNum >= info.compMethodInfo->locals.numArgs) { compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER); return; } // Get the local type var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo; typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo; /* Have we allocated a temp for this local? */ unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp")); // All vars of inlined methods should be !lvNormalizeOnLoad() assert(!lvaTable[lclNum].lvNormalizeOnLoad()); lclTyp = genActualType(lclTyp); impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal); } else { if (ilLclNum >= info.compMethodInfo->locals.numArgs) { BADCODE("Bad IL"); } unsigned lclNum = info.compArgsCount + ilLclNum; impLoadVar(lclNum, offset); } } #ifdef TARGET_ARM /************************************************************************************** * * When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the * dst struct, because struct promotion will turn it into a float/double variable while * the rhs will be an int/long variable. We don't code generate assignment of int into * a float, but there is nothing that might prevent us from doing so. The tree however * would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int)) * * tmpNum - the lcl dst variable num that is a struct. * src - the src tree assigned to the dest that is a struct/int (when varargs call.) * hClass - the type handle for the struct variable. * * TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play, * however, we could do a codegen of transferring from int to float registers * (transfer, not a cast.) * */ void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass) { if (src->gtOper == GT_CALL && src->AsCall()->IsVarargs() && IsHfa(hClass)) { int hfaSlots = GetHfaCount(hClass); var_types hfaType = GetHfaType(hClass); // If we have varargs we morph the method's return type to be "int" irrespective of its original // type: struct/float at importer because the ABI calls out return in integer registers. // We don't want struct promotion to replace an expression like this: // lclFld_int = callvar_int() into lclFld_float = callvar_int(); // This means an int is getting assigned to a float without a cast. Prevent the promotion. if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) || (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES)) { // Make sure this struct type stays as struct so we can receive the call in a struct. lvaTable[tmpNum].lvIsMultiRegRet = true; } } } #endif // TARGET_ARM #if FEATURE_MULTIREG_RET //------------------------------------------------------------------------ // impAssignMultiRegTypeToVar: ensure calls that return structs in multiple // registers return values to suitable temps. // // Arguments: // op -- call returning a struct in registers // hClass -- class handle for struct // // Returns: // Tree with reference to struct local to use as call return value. GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return")); impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL); GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType); // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. ret->gtFlags |= GTF_DONT_CSE; assert(IsMultiRegReturnedType(hClass, callConv)); // Mark the var so that fields are not promoted and stay together. lvaTable[tmpNum].lvIsMultiRegRet = true; return ret; } #endif // FEATURE_MULTIREG_RET //------------------------------------------------------------------------ // impReturnInstruction: import a return or an explicit tail call // // Arguments: // prefixFlags -- active IL prefixes // opcode -- [in, out] IL opcode // // Returns: // True if import was successful (may fail for some inlinees) // bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) { const bool isTailCall = (prefixFlags & PREFIX_TAILCALL) != 0; #ifdef DEBUG // If we are importing an inlinee and have GC ref locals we always // need to have a spill temp for the return value. This temp // should have been set up in advance, over in fgFindBasicBlocks. if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID)) { assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM); } #endif // DEBUG GenTree* op2 = nullptr; GenTree* op1 = nullptr; CORINFO_CLASS_HANDLE retClsHnd = nullptr; if (info.compRetType != TYP_VOID) { StackEntry se = impPopStack(); retClsHnd = se.seTypeInfo.GetClassHandle(); op2 = se.val; if (!compIsForInlining()) { impBashVarAddrsToI(op2); op2 = impImplicitIorI4Cast(op2, info.compRetType); op2 = impImplicitR4orR8Cast(op2, info.compRetType); // Note that we allow TYP_I_IMPL<->TYP_BYREF transformation, but only TYP_I_IMPL<-TYP_REF. assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) || ((op2->TypeGet() == TYP_I_IMPL) && TypeIs(info.compRetType, TYP_BYREF)) || (op2->TypeIs(TYP_BYREF, TYP_REF) && (info.compRetType == TYP_I_IMPL)) || (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) || (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType))); #ifdef DEBUG if (!isTailCall && opts.compGcChecks && (info.compRetType == TYP_REF)) { // DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with // one-return BB. assert(op2->gtType == TYP_REF); // confirm that the argument is a GC pointer (for debugging (GC stress)) GenTreeCall::Use* args = gtNewCallArgs(op2); op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args); if (verbose) { printf("\ncompGcChecks tree:\n"); gtDispTree(op2); } } #endif } else { if (verCurrentState.esStackDepth != 0) { assert(compIsForInlining()); JITDUMP("CALLSITE_COMPILATION_ERROR: inlinee's stack is not empty."); compInlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); return false; } #ifdef DEBUG if (verbose) { printf("\n\n Inlinee Return expression (before normalization) =>\n"); gtDispTree(op2); } #endif // Make sure the type matches the original call. var_types returnType = genActualType(op2->gtType); var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType; if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT)) { originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass); } if (returnType != originalCallType) { // Allow TYP_BYREF to be returned as TYP_I_IMPL and vice versa. // Allow TYP_REF to be returned as TYP_I_IMPL and NOT vice verse. if ((TypeIs(returnType, TYP_BYREF, TYP_REF) && (originalCallType == TYP_I_IMPL)) || ((returnType == TYP_I_IMPL) && TypeIs(originalCallType, TYP_BYREF))) { JITDUMP("Allowing return type mismatch: have %s, needed %s\n", varTypeName(returnType), varTypeName(originalCallType)); } else { JITDUMP("Return type mismatch: have %s, needed %s\n", varTypeName(returnType), varTypeName(originalCallType)); compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH); return false; } } // Below, we are going to set impInlineInfo->retExpr to the tree with the return // expression. At this point, retExpr could already be set if there are multiple // return blocks (meaning fgNeedReturnSpillTemp() == true) and one of // the other blocks already set it. If there is only a single return block, // retExpr shouldn't be set. However, this is not true if we reimport a block // with a return. In that case, retExpr will be set, then the block will be // reimported, but retExpr won't get cleared as part of setting the block to // be reimported. The reimported retExpr value should be the same, so even if // we don't unconditionally overwrite it, it shouldn't matter. if (info.compRetNativeType != TYP_STRUCT) { // compRetNativeType is not TYP_STRUCT. // This implies it could be either a scalar type or SIMD vector type or // a struct type that can be normalized to a scalar type. if (varTypeIsStruct(info.compRetType)) { noway_assert(info.compRetBuffArg == BAD_VAR_NUM); // adjust the type away from struct to integral // and no normalizing op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv); } else { // Do we have to normalize? var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType); if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) && fgCastNeeded(op2, fncRealRetType)) { // Small-typed return values are normalized by the callee op2 = gtNewCastNode(TYP_INT, op2, false, fncRealRetType); } } if (fgNeedReturnSpillTemp()) { assert(info.compRetNativeType != TYP_VOID && (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals())); // If this method returns a ref type, track the actual types seen // in the returns. if (info.compRetType == TYP_REF) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull); if (impInlineInfo->retExpr == nullptr) { // This is the first return, so best known type is the type // of this return value. impInlineInfo->retExprClassHnd = returnClsHnd; impInlineInfo->retExprClassHndIsExact = isExact; } else if (impInlineInfo->retExprClassHnd != returnClsHnd) { // This return site type differs from earlier seen sites, // so reset the info and we'll fall back to using the method's // declared return type for the return spill temp. impInlineInfo->retExprClassHnd = nullptr; impInlineInfo->retExprClassHndIsExact = false; } } impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); var_types lclRetType = lvaGetDesc(lvaInlineeReturnSpillTemp)->lvType; GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, lclRetType); op2 = tmpOp2; #ifdef DEBUG if (impInlineInfo->retExpr) { // Some other block(s) have seen the CEE_RET first. // Better they spilled to the same temp. assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR); assert(impInlineInfo->retExpr->AsLclVarCommon()->GetLclNum() == op2->AsLclVarCommon()->GetLclNum()); } #endif } #ifdef DEBUG if (verbose) { printf("\n\n Inlinee Return expression (after normalization) =>\n"); gtDispTree(op2); } #endif // Report the return expression impInlineInfo->retExpr = op2; } else { // compRetNativeType is TYP_STRUCT. // This implies that struct return via RetBuf arg or multi-reg struct return GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall(); // Assign the inlinee return into a spill temp. // spill temp only exists if there are multiple return points if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM) { // in this case we have to insert multiple struct copies to the temp // and the retexpr is just the temp. assert(info.compRetNativeType != TYP_VOID); assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()); impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); } #if defined(TARGET_ARM) || defined(UNIX_AMD64_ABI) #if defined(TARGET_ARM) // TODO-ARM64-NYI: HFA // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the // next ifdefs could be refactored in a single method with the ifdef inside. if (IsHfa(retClsHnd)) { // Same as !IsHfa but just don't bother with impAssignStructPtr. #else // defined(UNIX_AMD64_ABI) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { // If single eightbyte, the return type would have been normalized and there won't be a temp var. // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes - // max allowed.) assert(retRegCount == MAX_RET_REG_COUNT); // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr. CLANG_FORMAT_COMMENT_ANCHOR; #endif // defined(UNIX_AMD64_ABI) if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { #if defined(TARGET_ARM) impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType); #else // defined(UNIX_AMD64_ABI) // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); #endif // defined(UNIX_AMD64_ABI) } } else { impInlineInfo->retExpr = op2; } } else #elif defined(TARGET_ARM64) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { assert(!iciCall->HasRetBufArg()); assert(retRegCount >= 2); if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); } } else { impInlineInfo->retExpr = op2; } } else #elif defined(TARGET_X86) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { assert(!iciCall->HasRetBufArg()); assert(retRegCount == MAX_RET_REG_COUNT); if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); } } else { impInlineInfo->retExpr = op2; } } else #endif // defined(TARGET_ARM64) { assert(iciCall->HasRetBufArg()); GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->GetNode()); // spill temp only exists if there are multiple return points if (fgNeedReturnSpillTemp()) { // if this is the first return we have seen set the retExpr if (!impInlineInfo->retExpr) { impInlineInfo->retExpr = impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType), retClsHnd, (unsigned)CHECK_SPILL_ALL); } } else { impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL); } } } if (impInlineInfo->retExpr != nullptr) { impInlineInfo->retBB = compCurBB; } } } if (compIsForInlining()) { return true; } if (info.compRetType == TYP_VOID) { // return void op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID); } else if (info.compRetBuffArg != BAD_VAR_NUM) { // Assign value to return buff (first param) GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF DEBUGARG(impCurStmtDI.GetLocation().GetOffset())); op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL); impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX). CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_AMD64) // x64 (System V and Win64) calling convention requires to // return the implicit return buffer explicitly (in RAX). // Change the return type to be BYREF. op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); #else // !defined(TARGET_AMD64) // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX). // In such case the return value of the function is changed to BYREF. // If profiler hook is not needed the return type of the function is TYP_VOID. if (compIsProfilerHookNeeded()) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #if defined(TARGET_ARM64) // On ARM64, the native instance calling convention variant // requires the implicit ByRef to be explicitly returned. else if (TargetOS::IsWindows && callConvIsInstanceMethodCallConv(info.compCallConv)) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #endif #if defined(TARGET_X86) else if (info.compCallConv != CorInfoCallConvExtension::Managed) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #endif else { // return void op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID); } #endif // !defined(TARGET_AMD64) } else if (varTypeIsStruct(info.compRetType)) { #if !FEATURE_MULTIREG_RET // For both ARM architectures the HFA native types are maintained as structs. // Also on System V AMD64 the multireg structs returns are also left as structs. noway_assert(info.compRetNativeType != TYP_STRUCT); #endif op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv); // return op2 var_types returnType = info.compRetType; op1 = gtNewOperNode(GT_RETURN, genActualType(returnType), op2); } else { // return op2 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2); } // We must have imported a tailcall and jumped to RET if (isTailCall) { assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode)); opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES // impImportCall() would have already appended TYP_VOID calls if (info.compRetType == TYP_VOID) { return true; } } impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG // Remember at which BC offset the tree was finished impNoteLastILoffs(); #endif return true; } /***************************************************************************** * Mark the block as unimported. * Note that the caller is responsible for calling impImportBlockPending(), * with the appropriate stack-state */ inline void Compiler::impReimportMarkBlock(BasicBlock* block) { #ifdef DEBUG if (verbose && (block->bbFlags & BBF_IMPORTED)) { printf("\n" FMT_BB " will be reimported\n", block->bbNum); } #endif block->bbFlags &= ~BBF_IMPORTED; } /***************************************************************************** * Mark the successors of the given block as unimported. * Note that the caller is responsible for calling impImportBlockPending() * for all the successors, with the appropriate stack-state. */ void Compiler::impReimportMarkSuccessors(BasicBlock* block) { for (BasicBlock* const succBlock : block->Succs()) { impReimportMarkBlock(succBlock); } } /***************************************************************************** * * Filter wrapper to handle only passed in exception code * from it). */ LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam) { if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION) { return EXCEPTION_EXECUTE_HANDLER; } return EXCEPTION_CONTINUE_SEARCH; } void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart) { assert(block->hasTryIndex()); assert(!compIsForInlining()); unsigned tryIndex = block->getTryIndex(); EHblkDsc* HBtab = ehGetDsc(tryIndex); if (isTryStart) { assert(block->bbFlags & BBF_TRY_BEG); // The Stack must be empty // if (block->bbStkDepth != 0) { BADCODE("Evaluation stack must be empty on entry into a try block"); } } // Save the stack contents, we'll need to restore it later // SavedStack blockState; impSaveStackState(&blockState, false); while (HBtab != nullptr) { if (isTryStart) { // Are we verifying that an instance constructor properly initializes it's 'this' pointer once? // We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions // if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init)) { // We trigger an invalid program exception here unless we have a try/fault region. // if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter()) { BADCODE( "The 'this' pointer of an instance constructor is not intialized upon entry to a try region"); } else { // Allow a try/fault region to proceed. assert(HBtab->HasFaultHandler()); } } } // Recursively process the handler block, if we haven't already done so. BasicBlock* hndBegBB = HBtab->ebdHndBeg; if (((hndBegBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(hndBegBB) == 0)) { // Construct the proper verification stack state // either empty or one that contains just // the Exception Object that we are dealing with // verCurrentState.esStackDepth = 0; if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp)) { CORINFO_CLASS_HANDLE clsHnd; if (HBtab->HasFilter()) { clsHnd = impGetObjectClass(); } else { CORINFO_RESOLVED_TOKEN resolvedToken; resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; resolvedToken.token = HBtab->ebdTyp; resolvedToken.tokenType = CORINFO_TOKENKIND_Class; info.compCompHnd->resolveToken(&resolvedToken); clsHnd = resolvedToken.hClass; } // push catch arg the stack, spill to a temp if necessary // Note: can update HBtab->ebdHndBeg! hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false); } // Queue up the handler for importing // impImportBlockPending(hndBegBB); } // Process the filter block, if we haven't already done so. if (HBtab->HasFilter()) { /* @VERIFICATION : Ideally the end of filter state should get propagated to the catch handler, this is an incompleteness, but is not a security/compliance issue, since the only interesting state is the 'thisInit' state. */ BasicBlock* filterBB = HBtab->ebdFilter; if (((filterBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(filterBB) == 0)) { verCurrentState.esStackDepth = 0; // push catch arg the stack, spill to a temp if necessary // Note: can update HBtab->ebdFilter! const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB); filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter); impImportBlockPending(filterBB); } } // This seems redundant ....?? if (verTrackObjCtorInitState && HBtab->HasFaultHandler()) { /* Recursively process the handler block */ verCurrentState.esStackDepth = 0; // Queue up the fault handler for importing // impImportBlockPending(HBtab->ebdHndBeg); } // Now process our enclosing try index (if any) // tryIndex = HBtab->ebdEnclosingTryIndex; if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { HBtab = nullptr; } else { HBtab = ehGetDsc(tryIndex); } } // Restore the stack contents impRestoreStackState(&blockState); } //*************************************************************** // Import the instructions for the given basic block. Perform // verification, throwing an exception on failure. Push any successor blocks that are enabled for the first // time, or whose verification pre-state is changed. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif void Compiler::impImportBlock(BasicBlock* block) { // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to // handle them specially. In particular, there is no IL to import for them, but we do need // to mark them as imported and put their successors on the pending import list. if (block->bbFlags & BBF_INTERNAL) { JITDUMP("Marking BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", block->bbNum); block->bbFlags |= BBF_IMPORTED; for (BasicBlock* const succBlock : block->Succs()) { impImportBlockPending(succBlock); } return; } bool markImport; assert(block); /* Make the block globaly available */ compCurBB = block; #ifdef DEBUG /* Initialize the debug variables */ impCurOpcName = "unknown"; impCurOpcOffs = block->bbCodeOffs; #endif /* Set the current stack state to the merged result */ verResetCurrentState(block, &verCurrentState); /* Now walk the code and import the IL into GenTrees */ struct FilterVerificationExceptionsParam { Compiler* pThis; BasicBlock* block; }; FilterVerificationExceptionsParam param; param.pThis = this; param.block = block; PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param) { /* @VERIFICATION : For now, the only state propagation from try to it's handler is "thisInit" state (stack is empty at start of try). In general, for state that we track in verification, we need to model the possibility that an exception might happen at any IL instruction, so we really need to merge all states that obtain between IL instructions in a try block into the start states of all handlers. However we do not allow the 'this' pointer to be uninitialized when entering most kinds try regions (only try/fault are allowed to have an uninitialized this pointer on entry to the try) Fortunately, the stack is thrown away when an exception leads to a handler, so we don't have to worry about that. We DO, however, have to worry about the "thisInit" state. But only for the try/fault case. The only allowed transition is from TIS_Uninit to TIS_Init. So for a try/fault region for the fault handler block we will merge the start state of the try begin and the post-state of each block that is part of this try region */ // merge the start state of the try begin // if (pParam->block->bbFlags & BBF_TRY_BEG) { pParam->pThis->impVerifyEHBlock(pParam->block, true); } pParam->pThis->impImportBlockCode(pParam->block); // As discussed above: // merge the post-state of each block that is part of this try region // if (pParam->block->hasTryIndex()) { pParam->pThis->impVerifyEHBlock(pParam->block, false); } } PAL_EXCEPT_FILTER(FilterVerificationExceptions) { verHandleVerificationFailure(block DEBUGARG(false)); } PAL_ENDTRY if (compDonotInline()) { return; } assert(!compDonotInline()); markImport = false; SPILLSTACK: unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks bool reimportSpillClique = false; BasicBlock* tgtBlock = nullptr; /* If the stack is non-empty, we might have to spill its contents */ if (verCurrentState.esStackDepth != 0) { impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something // on the stack, its lifetime is hard to determine, simply // don't reuse such temps. Statement* addStmt = nullptr; /* Do the successors of 'block' have any other predecessors ? We do not want to do some of the optimizations related to multiRef if we can reimport blocks */ unsigned multRef = impCanReimport ? unsigned(~0) : 0; switch (block->bbJumpKind) { case BBJ_COND: addStmt = impExtractLastStmt(); assert(addStmt->GetRootNode()->gtOper == GT_JTRUE); /* Note if the next block has more than one ancestor */ multRef |= block->bbNext->bbRefs; /* Does the next block have temps assigned? */ baseTmp = block->bbNext->bbStkTempsIn; tgtBlock = block->bbNext; if (baseTmp != NO_BASE_TMP) { break; } /* Try the target of the jump then */ multRef |= block->bbJumpDest->bbRefs; baseTmp = block->bbJumpDest->bbStkTempsIn; tgtBlock = block->bbJumpDest; break; case BBJ_ALWAYS: multRef |= block->bbJumpDest->bbRefs; baseTmp = block->bbJumpDest->bbStkTempsIn; tgtBlock = block->bbJumpDest; break; case BBJ_NONE: multRef |= block->bbNext->bbRefs; baseTmp = block->bbNext->bbStkTempsIn; tgtBlock = block->bbNext; break; case BBJ_SWITCH: addStmt = impExtractLastStmt(); assert(addStmt->GetRootNode()->gtOper == GT_SWITCH); for (BasicBlock* const tgtBlock : block->SwitchTargets()) { multRef |= tgtBlock->bbRefs; // Thanks to spill cliques, we should have assigned all or none assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn)); baseTmp = tgtBlock->bbStkTempsIn; if (multRef > 1) { break; } } break; case BBJ_CALLFINALLY: case BBJ_EHCATCHRET: case BBJ_RETURN: case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: case BBJ_THROW: NO_WAY("can't have 'unreached' end of BB with non-empty stack"); break; default: noway_assert(!"Unexpected bbJumpKind"); break; } assert(multRef >= 1); /* Do we have a base temp number? */ bool newTemps = (baseTmp == NO_BASE_TMP); if (newTemps) { /* Grab enough temps for the whole stack */ baseTmp = impGetSpillTmpBase(block); } /* Spill all stack entries into temps */ unsigned level, tempNum; JITDUMP("\nSpilling stack entries into temps\n"); for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++) { GenTree* tree = verCurrentState.esStack[level].val; /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from the other. This should merge to a byref in unverifiable code. However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the successor would be imported assuming there was a TYP_I_IMPL on the stack. Thus the value would not get GC-tracked. Hence, change the temp to TYP_BYREF and reimport the successors. Note: We should only allow this in unverifiable code. */ if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL) { lvaTable[tempNum].lvType = TYP_BYREF; impReimportMarkSuccessors(block); markImport = true; } #ifdef TARGET_64BIT if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT) { // Some other block in the spill clique set this to "int", but now we have "native int". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_I_IMPL; reimportSpillClique = true; } else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL) { // Spill clique has decided this should be "native int", but this block only pushes an "int". // Insert a sign-extension to "native int" so we match the clique. verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } // Consider the case where one branch left a 'byref' on the stack and the other leaves // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64 // behavior instead of asserting and then generating bad code (where we save/restore the // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been // imported already, we need to change the type of the local and reimport the spill clique. // If the 'byref' side has imported, we insert a cast from int to 'native int' to match // the 'byref' size. if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT) { // Some other block in the spill clique set this to "int", but now we have "byref". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_BYREF; reimportSpillClique = true; } else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF) { // Spill clique has decided this should be "byref", but this block only pushes an "int". // Insert a sign-extension to "native int" so we match the clique size. verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } #endif // TARGET_64BIT if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT) { // Some other block in the spill clique set this to "float", but now we have "double". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_DOUBLE; reimportSpillClique = true; } else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE) { // Spill clique has decided this should be "double", but this block only pushes a "float". // Insert a cast to "double" so we match the clique. verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE); } /* If addStmt has a reference to tempNum (can only happen if we are spilling to the temps already used by a previous block), we need to spill addStmt */ if (addStmt != nullptr && !newTemps && gtHasRef(addStmt->GetRootNode(), tempNum)) { GenTree* addTree = addStmt->GetRootNode(); if (addTree->gtOper == GT_JTRUE) { GenTree* relOp = addTree->AsOp()->gtOp1; assert(relOp->OperIsCompare()); var_types type = genActualType(relOp->AsOp()->gtOp1->TypeGet()); if (gtHasRef(relOp->AsOp()->gtOp1, tempNum)) { unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1")); impAssignTempGen(temp, relOp->AsOp()->gtOp1, level); type = genActualType(lvaTable[temp].TypeGet()); relOp->AsOp()->gtOp1 = gtNewLclvNode(temp, type); } if (gtHasRef(relOp->AsOp()->gtOp2, tempNum)) { unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2")); impAssignTempGen(temp, relOp->AsOp()->gtOp2, level); type = genActualType(lvaTable[temp].TypeGet()); relOp->AsOp()->gtOp2 = gtNewLclvNode(temp, type); } } else { assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->AsOp()->gtOp1->TypeGet())); unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH")); impAssignTempGen(temp, addTree->AsOp()->gtOp1, level); addTree->AsOp()->gtOp1 = gtNewLclvNode(temp, genActualType(addTree->AsOp()->gtOp1->TypeGet())); } } /* Spill the stack entry, and replace with the temp */ if (!impSpillStackEntry(level, tempNum #ifdef DEBUG , true, "Spill Stack Entry" #endif )) { if (markImport) { BADCODE("bad stack state"); } // Oops. Something went wrong when spilling. Bad code. verHandleVerificationFailure(block DEBUGARG(true)); goto SPILLSTACK; } } /* Put back the 'jtrue'/'switch' if we removed it earlier */ if (addStmt != nullptr) { impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE); } } // Some of the append/spill logic works on compCurBB assert(compCurBB == block); /* Save the tree list in the block */ impEndTreeList(block); // impEndTreeList sets BBF_IMPORTED on the block // We do *NOT* want to set it later than this because // impReimportSpillClique might clear it if this block is both a // predecessor and successor in the current spill clique assert(block->bbFlags & BBF_IMPORTED); // If we had a int/native int, or float/double collision, we need to re-import if (reimportSpillClique) { // This will re-import all the successors of block (as well as each of their predecessors) impReimportSpillClique(block); // For blocks that haven't been imported yet, we still need to mark them as pending import. for (BasicBlock* const succ : block->Succs()) { if ((succ->bbFlags & BBF_IMPORTED) == 0) { impImportBlockPending(succ); } } } else // the normal case { // otherwise just import the successors of block /* Does this block jump to any other blocks? */ for (BasicBlock* const succ : block->Succs()) { impImportBlockPending(succ); } } } #ifdef _PREFAST_ #pragma warning(pop) #endif /*****************************************************************************/ // // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in // impPendingBlockMembers). Merges the current verification state into the verification state of "block" // (its "pre-state"). void Compiler::impImportBlockPending(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nimpImportBlockPending for " FMT_BB "\n", block->bbNum); } #endif // We will add a block to the pending set if it has not already been imported (or needs to be re-imported), // or if it has, but merging in a predecessor's post-state changes the block's pre-state. // (When we're doing verification, we always attempt the merge to detect verification errors.) // If the block has not been imported, add to pending set. bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0); // Initialize bbEntryState just the first time we try to add this block to the pending list // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set // We use NULL to indicate the 'common' state to avoid memory allocation if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) && (impGetPendingBlockMember(block) == 0)) { verInitBBEntryState(block, &verCurrentState); assert(block->bbStkDepth == 0); block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth); assert(addToPending); assert(impGetPendingBlockMember(block) == 0); } else { // The stack should have the same height on entry to the block from all its predecessors. if (block->bbStkDepth != verCurrentState.esStackDepth) { #ifdef DEBUG char buffer[400]; sprintf_s(buffer, sizeof(buffer), "Block at offset %4.4x to %4.4x in %0.200s entered with different stack depths.\n" "Previous depth was %d, current depth is %d", block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth, verCurrentState.esStackDepth); buffer[400 - 1] = 0; NO_WAY(buffer); #else NO_WAY("Block entered with different stack depths"); #endif } if (!addToPending) { return; } if (block->bbStkDepth > 0) { // We need to fix the types of any spill temps that might have changed: // int->native int, float->double, int->byref, etc. impRetypeEntryStateTemps(block); } // OK, we must add to the pending list, if it's not already in it. if (impGetPendingBlockMember(block) != 0) { return; } } // Get an entry to add to the pending list PendingDsc* dsc; if (impPendingFree) { // We can reuse one of the freed up dscs. dsc = impPendingFree; impPendingFree = dsc->pdNext; } else { // We have to create a new dsc dsc = new (this, CMK_Unknown) PendingDsc; } dsc->pdBB = block; dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth; dsc->pdThisPtrInit = verCurrentState.thisInitialized; // Save the stack trees for later if (verCurrentState.esStackDepth) { impSaveStackState(&dsc->pdSavedStack, false); } // Add the entry to the pending list dsc->pdNext = impPendingList; impPendingList = dsc; impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set. // Various assertions require us to now to consider the block as not imported (at least for // the final time...) block->bbFlags &= ~BBF_IMPORTED; #ifdef DEBUG if (verbose && 0) { printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum); } #endif } /*****************************************************************************/ // // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in // impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block. void Compiler::impReimportBlockPending(BasicBlock* block) { JITDUMP("\nimpReimportBlockPending for " FMT_BB, block->bbNum); assert(block->bbFlags & BBF_IMPORTED); // OK, we must add to the pending list, if it's not already in it. if (impGetPendingBlockMember(block) != 0) { return; } // Get an entry to add to the pending list PendingDsc* dsc; if (impPendingFree) { // We can reuse one of the freed up dscs. dsc = impPendingFree; impPendingFree = dsc->pdNext; } else { // We have to create a new dsc dsc = new (this, CMK_ImpStack) PendingDsc; } dsc->pdBB = block; if (block->bbEntryState) { dsc->pdThisPtrInit = block->bbEntryState->thisInitialized; dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth; dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack; } else { dsc->pdThisPtrInit = TIS_Bottom; dsc->pdSavedStack.ssDepth = 0; dsc->pdSavedStack.ssTrees = nullptr; } // Add the entry to the pending list dsc->pdNext = impPendingList; impPendingList = dsc; impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set. // Various assertions require us to now to consider the block as not imported (at least for // the final time...) block->bbFlags &= ~BBF_IMPORTED; #ifdef DEBUG if (verbose && 0) { printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum); } #endif } void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp) { if (comp->impBlockListNodeFreeList == nullptr) { return comp->getAllocator(CMK_BasicBlock).allocate<BlockListNode>(1); } else { BlockListNode* res = comp->impBlockListNodeFreeList; comp->impBlockListNodeFreeList = res->m_next; return res; } } void Compiler::FreeBlockListNode(Compiler::BlockListNode* node) { node->m_next = impBlockListNodeFreeList; impBlockListNodeFreeList = node; } void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback) { bool toDo = true; noway_assert(!fgComputePredsDone); if (!fgCheapPredsValid) { fgComputeCheapPreds(); } BlockListNode* succCliqueToDo = nullptr; BlockListNode* predCliqueToDo = new (this) BlockListNode(block); while (toDo) { toDo = false; // Look at the successors of every member of the predecessor to-do list. while (predCliqueToDo != nullptr) { BlockListNode* node = predCliqueToDo; predCliqueToDo = node->m_next; BasicBlock* blk = node->m_blk; FreeBlockListNode(node); for (BasicBlock* const succ : blk->Succs()) { // If it's not already in the clique, add it, and also add it // as a member of the successor "toDo" set. if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0) { callback->Visit(SpillCliqueSucc, succ); impSpillCliqueSetMember(SpillCliqueSucc, succ, 1); succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo); toDo = true; } } } // Look at the predecessors of every member of the successor to-do list. while (succCliqueToDo != nullptr) { BlockListNode* node = succCliqueToDo; succCliqueToDo = node->m_next; BasicBlock* blk = node->m_blk; FreeBlockListNode(node); for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next) { BasicBlock* predBlock = pred->block; // If it's not already in the clique, add it, and also add it // as a member of the predecessor "toDo" set. if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0) { callback->Visit(SpillCliquePred, predBlock); impSpillCliqueSetMember(SpillCliquePred, predBlock, 1); predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo); toDo = true; } } } } // If this fails, it means we didn't walk the spill clique properly and somehow managed // miss walking back to include the predecessor we started from. // This most likely cause: missing or out of date bbPreds assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0); } void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) { if (predOrSucc == SpillCliqueSucc) { assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor. blk->bbStkTempsIn = m_baseTmp; } else { assert(predOrSucc == SpillCliquePred); assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor. blk->bbStkTempsOut = m_baseTmp; } } void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) { // For Preds we could be a little smarter and just find the existing store // and re-type it/add a cast, but that is complicated and hopefully very rare, so // just re-import the whole block (just like we do for successors) if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0)) { // If we haven't imported this block and we're not going to (because it isn't on // the pending list) then just ignore it for now. // This block has either never been imported (EntryState == NULL) or it failed // verification. Neither state requires us to force it to be imported now. assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION)); return; } // For successors we have a valid verCurrentState, so just mark them for reimport // the 'normal' way // Unlike predecessors, we *DO* need to reimport the current block because the // initial import had the wrong entry state types. // Similarly, blocks that are currently on the pending list, still need to call // impImportBlockPending to fixup their entry state. if (predOrSucc == SpillCliqueSucc) { m_pComp->impReimportMarkBlock(blk); // Set the current stack state to that of the blk->bbEntryState m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState); assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry()); m_pComp->impImportBlockPending(blk); } else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0)) { // As described above, we are only visiting predecessors so they can // add the appropriate casts, since we have already done that for the current // block, it does not need to be reimported. // Nor do we need to reimport blocks that are still pending, but not yet // imported. // // For predecessors, we have no state to seed the EntryState, so we just have // to assume the existing one is correct. // If the block is also a successor, it will get the EntryState properly // updated when it is visited as a successor in the above "if" block. assert(predOrSucc == SpillCliquePred); m_pComp->impReimportBlockPending(blk); } } // Re-type the incoming lclVar nodes to match the varDsc. void Compiler::impRetypeEntryStateTemps(BasicBlock* blk) { if (blk->bbEntryState != nullptr) { EntryState* es = blk->bbEntryState; for (unsigned level = 0; level < es->esStackDepth; level++) { GenTree* tree = es->esStack[level].val; if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD)) { es->esStack[level].val->gtType = lvaGetDesc(tree->AsLclVarCommon())->TypeGet(); } } } } unsigned Compiler::impGetSpillTmpBase(BasicBlock* block) { if (block->bbStkTempsOut != NO_BASE_TMP) { return block->bbStkTempsOut; } #ifdef DEBUG if (verbose) { printf("\n*************** In impGetSpillTmpBase(" FMT_BB ")\n", block->bbNum); } #endif // DEBUG // Otherwise, choose one, and propagate to all members of the spill clique. // Grab enough temps for the whole stack. unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries")); SetSpillTempsBase callback(baseTmp); // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor // to one spill clique, and similarly can only be the successor to one spill clique impWalkSpillCliqueFromPred(block, &callback); return baseTmp; } void Compiler::impReimportSpillClique(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\n*************** In impReimportSpillClique(" FMT_BB ")\n", block->bbNum); } #endif // DEBUG // If we get here, it is because this block is already part of a spill clique // and one predecessor had an outgoing live stack slot of type int, and this // block has an outgoing live stack slot of type native int. // We need to reset these before traversal because they have already been set // by the previous walk to determine all the members of the spill clique. impInlineRoot()->impSpillCliquePredMembers.Reset(); impInlineRoot()->impSpillCliqueSuccMembers.Reset(); ReimportSpillClique callback(this); impWalkSpillCliqueFromPred(block, &callback); } // Set the pre-state of "block" (which should not have a pre-state allocated) to // a copy of "srcState", cloning tree pointers as required. void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState) { if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom) { block->bbEntryState = nullptr; return; } block->bbEntryState = getAllocator(CMK_Unknown).allocate<EntryState>(1); // block->bbEntryState.esRefcount = 1; block->bbEntryState->esStackDepth = srcState->esStackDepth; block->bbEntryState->thisInitialized = TIS_Bottom; if (srcState->esStackDepth > 0) { block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]); unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry); memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize); for (unsigned level = 0; level < srcState->esStackDepth; level++) { GenTree* tree = srcState->esStack[level].val; block->bbEntryState->esStack[level].val = gtCloneExpr(tree); } } if (verTrackObjCtorInitState) { verSetThisInit(block, srcState->thisInitialized); } return; } void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis) { assert(tis != TIS_Bottom); // Precondition. if (block->bbEntryState == nullptr) { block->bbEntryState = new (this, CMK_Unknown) EntryState(); } block->bbEntryState->thisInitialized = tis; } /* * Resets the current state to the state at the start of the basic block */ void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState) { if (block->bbEntryState == nullptr) { destState->esStackDepth = 0; destState->thisInitialized = TIS_Bottom; return; } destState->esStackDepth = block->bbEntryState->esStackDepth; if (destState->esStackDepth > 0) { unsigned stackSize = destState->esStackDepth * sizeof(StackEntry); memcpy(destState->esStack, block->bbStackOnEntry(), stackSize); } destState->thisInitialized = block->bbThisOnEntry(); return; } ThisInitState BasicBlock::bbThisOnEntry() const { return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom; } unsigned BasicBlock::bbStackDepthOnEntry() const { return (bbEntryState ? bbEntryState->esStackDepth : 0); } void BasicBlock::bbSetStack(void* stackBuffer) { assert(bbEntryState); assert(stackBuffer); bbEntryState->esStack = (StackEntry*)stackBuffer; } StackEntry* BasicBlock::bbStackOnEntry() const { assert(bbEntryState); return bbEntryState->esStack; } void Compiler::verInitCurrentState() { verTrackObjCtorInitState = false; verCurrentState.thisInitialized = TIS_Bottom; // initialize stack info verCurrentState.esStackDepth = 0; assert(verCurrentState.esStack != nullptr); // copy current state to entry state of first BB verInitBBEntryState(fgFirstBB, &verCurrentState); } Compiler* Compiler::impInlineRoot() { if (impInlineInfo == nullptr) { return this; } else { return impInlineInfo->InlineRoot; } } BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk) { if (predOrSucc == SpillCliquePred) { return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd()); } else { assert(predOrSucc == SpillCliqueSucc); return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd()); } } void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val) { if (predOrSucc == SpillCliquePred) { impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val); } else { assert(predOrSucc == SpillCliqueSucc); impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val); } } /***************************************************************************** * * Convert the instrs ("import") into our internal format (trees). The * basic flowgraph has already been constructed and is passed in. */ void Compiler::impImport() { #ifdef DEBUG if (verbose) { printf("*************** In impImport() for %s\n", info.compFullName); } #endif Compiler* inlineRoot = impInlineRoot(); if (info.compMaxStack <= SMALL_STACK_SIZE) { impStkSize = SMALL_STACK_SIZE; } else { impStkSize = info.compMaxStack; } if (this == inlineRoot) { // Allocate the stack contents verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize]; } else { // This is the inlinee compiler, steal the stack from the inliner compiler // (after ensuring that it is large enough). if (inlineRoot->impStkSize < impStkSize) { inlineRoot->impStkSize = impStkSize; inlineRoot->verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize]; } verCurrentState.esStack = inlineRoot->verCurrentState.esStack; } // initialize the entry state at start of method verInitCurrentState(); // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase). if (this == inlineRoot) // These are only used on the root of the inlining tree. { // We have initialized these previously, but to size 0. Make them larger. impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2); impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2); impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2); } inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2); inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2); inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2); impBlockListNodeFreeList = nullptr; #ifdef DEBUG impLastILoffsStmt = nullptr; impNestedStackSpill = false; #endif impBoxTemp = BAD_VAR_NUM; impPendingList = impPendingFree = nullptr; // Skip leading internal blocks. // These can arise from needing a leading scratch BB, from EH normalization, and from OSR entry redirects. // BasicBlock* entryBlock = fgFirstBB; while (entryBlock->bbFlags & BBF_INTERNAL) { JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", entryBlock->bbNum); entryBlock->bbFlags |= BBF_IMPORTED; if (entryBlock->bbJumpKind == BBJ_NONE) { entryBlock = entryBlock->bbNext; } else if (opts.IsOSR() && (entryBlock->bbJumpKind == BBJ_ALWAYS)) { entryBlock = entryBlock->bbJumpDest; } else { assert(!"unexpected bbJumpKind in entry sequence"); } } // Note for OSR we'd like to be able to verify this block must be // stack empty, but won't know that until we've imported...so instead // we'll BADCODE out if we mess up. // // (the concern here is that the runtime asks us to OSR a // different IL version than the one that matched the method that // triggered OSR). This should not happen but I might have the // IL versioning stuff wrong. // // TODO: we also currently expect this block to be a join point, // which we should verify over when we find jump targets. impImportBlockPending(entryBlock); /* Import blocks in the worker-list until there are no more */ while (impPendingList) { /* Remove the entry at the front of the list */ PendingDsc* dsc = impPendingList; impPendingList = impPendingList->pdNext; impSetPendingBlockMember(dsc->pdBB, 0); /* Restore the stack state */ verCurrentState.thisInitialized = dsc->pdThisPtrInit; verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth; if (verCurrentState.esStackDepth) { impRestoreStackState(&dsc->pdSavedStack); } /* Add the entry to the free list for reuse */ dsc->pdNext = impPendingFree; impPendingFree = dsc; /* Now import the block */ if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION) { verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true)); impEndTreeList(dsc->pdBB); } else { impImportBlock(dsc->pdBB); if (compDonotInline()) { return; } if (compIsForImportOnly()) { return; } } } #ifdef DEBUG if (verbose && info.compXcptnsCount) { printf("\nAfter impImport() added block for try,catch,finally"); fgDispBasicBlocks(); printf("\n"); } // Used in impImportBlockPending() for STRESS_CHK_REIMPORT for (BasicBlock* const block : Blocks()) { block->bbFlags &= ~BBF_VISITED; } #endif } // Checks if a typeinfo (usually stored in the type stack) is a struct. // The invariant here is that if it's not a ref or a method and has a class handle // it's a valuetype bool Compiler::impIsValueType(typeInfo* pTypeInfo) { if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd()) { return true; } else { return false; } } /***************************************************************************** * Check to see if the tree is the address of a local or the address of a field in a local. *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns true. */ bool Compiler::impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut) { if (tree->gtOper != GT_ADDR) { return false; } GenTree* op = tree->AsOp()->gtOp1; while (op->gtOper == GT_FIELD) { op = op->AsField()->GetFldObj(); if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL. { op = op->AsOp()->gtOp1; } else { return false; } } if (op->gtOper == GT_LCL_VAR) { if (lclVarTreeOut != nullptr) { *lclVarTreeOut = op; } return true; } else { return false; } } //------------------------------------------------------------------------ // impMakeDiscretionaryInlineObservations: make observations that help // determine the profitability of a discretionary inline // // Arguments: // pInlineInfo -- InlineInfo for the inline, or null for the prejit root // inlineResult -- InlineResult accumulating information about this inline // // Notes: // If inlining or prejitting the root, this method also makes // various observations about the method that factor into inline // decisions. It sets `compNativeSizeEstimate` as a side effect. void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult) { assert((pInlineInfo != nullptr && compIsForInlining()) || // Perform the actual inlining. (pInlineInfo == nullptr && !compIsForInlining()) // Calculate the static inlining hint for ngen. ); // If we're really inlining, we should just have one result in play. assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult)); // If this is a "forceinline" method, the JIT probably shouldn't have gone // to the trouble of estimating the native code size. Even if it did, it // shouldn't be relying on the result of this method. assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE); // Note if the caller contains NEWOBJ or NEWARR. Compiler* rootCompiler = impInlineRoot(); if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0) { inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY); } if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0) { inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ); } bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0; bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0; if (isSpecialMethod) { if (calleeIsStatic) { inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR); } else { inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR); } } else if (!calleeIsStatic) { // Callee is an instance method. // // Check if the callee has the same 'this' as the root. if (pInlineInfo != nullptr) { GenTree* thisArg = pInlineInfo->iciCall->AsCall()->gtCallThisArg->GetNode(); assert(thisArg); bool isSameThis = impIsThis(thisArg); inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis); } } bool callsiteIsGeneric = (rootCompiler->info.compMethodInfo->args.sigInst.methInstCount != 0) || (rootCompiler->info.compMethodInfo->args.sigInst.classInstCount != 0); bool calleeIsGeneric = (info.compMethodInfo->args.sigInst.methInstCount != 0) || (info.compMethodInfo->args.sigInst.classInstCount != 0); if (!callsiteIsGeneric && calleeIsGeneric) { inlineResult->Note(InlineObservation::CALLSITE_NONGENERIC_CALLS_GENERIC); } // Inspect callee's arguments (and the actual values at the callsite for them) CORINFO_SIG_INFO sig = info.compMethodInfo->args; CORINFO_ARG_LIST_HANDLE sigArg = sig.args; GenTreeCall::Use* argUse = pInlineInfo == nullptr ? nullptr : pInlineInfo->iciCall->AsCall()->gtCallArgs; for (unsigned i = 0; i < info.compMethodInfo->args.numArgs; i++) { CORINFO_CLASS_HANDLE sigClass; CorInfoType corType = strip(info.compCompHnd->getArgType(&sig, sigArg, &sigClass)); GenTree* argNode = argUse == nullptr ? nullptr : argUse->GetNode()->gtSkipPutArgType(); if (corType == CORINFO_TYPE_CLASS) { sigClass = info.compCompHnd->getArgClass(&sig, sigArg); } else if (corType == CORINFO_TYPE_VALUECLASS) { inlineResult->Note(InlineObservation::CALLEE_ARG_STRUCT); } else if (corType == CORINFO_TYPE_BYREF) { sigClass = info.compCompHnd->getArgClass(&sig, sigArg); corType = info.compCompHnd->getChildType(sigClass, &sigClass); } if (argNode != nullptr) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE argCls = gtGetClassHandle(argNode, &isExact, &isNonNull); if (argCls != nullptr) { const bool isArgValueType = eeIsValueClass(argCls); // Exact class of the arg is known if (isExact && !isArgValueType) { inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS); if ((argCls != sigClass) && (sigClass != nullptr)) { // .. but the signature accepts a less concrete type. inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS_SIG_IS_NOT); } } // Arg is a reference type in the signature and a boxed value type was passed. else if (isArgValueType && (corType == CORINFO_TYPE_CLASS)) { inlineResult->Note(InlineObservation::CALLSITE_ARG_BOXED); } } if (argNode->OperIsConst()) { inlineResult->Note(InlineObservation::CALLSITE_ARG_CONST); } argUse = argUse->GetNext(); } sigArg = info.compCompHnd->getArgNext(sigArg); } // Note if the callee's return type is a value type if (info.compMethodInfo->args.retType == CORINFO_TYPE_VALUECLASS) { inlineResult->Note(InlineObservation::CALLEE_RETURNS_STRUCT); } // Note if the callee's class is a promotable struct if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0) { assert(structPromotionHelper != nullptr); if (structPromotionHelper->CanPromoteStructType(info.compClassHnd)) { inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE); } inlineResult->Note(InlineObservation::CALLEE_CLASS_VALUETYPE); } #ifdef FEATURE_SIMD // Note if this method is has SIMD args or return value if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn) { inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD); } #endif // FEATURE_SIMD // Roughly classify callsite frequency. InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED; // If this is a prejit root, or a maximally hot block... if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->isMaxBBWeight())) { frequency = InlineCallsiteFrequency::HOT; } // No training data. Look for loop-like things. // We consider a recursive call loop-like. Do not give the inlining boost to the method itself. // However, give it to things nearby. else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) && (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle)) { frequency = InlineCallsiteFrequency::LOOP; } else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT)) { frequency = InlineCallsiteFrequency::WARM; } // Now modify the multiplier based on where we're called from. else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR)) { frequency = InlineCallsiteFrequency::RARE; } else { frequency = InlineCallsiteFrequency::BORING; } // Also capture the block weight of the call site. // // In the prejit root case, assume at runtime there might be a hot call site // for this method, so we won't prematurely conclude this method should never // be inlined. // weight_t weight = 0; if (pInlineInfo != nullptr) { weight = pInlineInfo->iciBlock->bbWeight; } else { const weight_t prejitHotCallerWeight = 1000000.0; weight = prejitHotCallerWeight; } inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency)); inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, (int)(weight)); bool hasProfile = false; double profileFreq = 0.0; // If the call site has profile data, report the relative frequency of the site. // if ((pInlineInfo != nullptr) && rootCompiler->fgHaveSufficientProfileData()) { const weight_t callSiteWeight = pInlineInfo->iciBlock->bbWeight; const weight_t entryWeight = rootCompiler->fgFirstBB->bbWeight; profileFreq = fgProfileWeightsEqual(entryWeight, 0.0) ? 0.0 : callSiteWeight / entryWeight; hasProfile = true; assert(callSiteWeight >= 0); assert(entryWeight >= 0); } else if (pInlineInfo == nullptr) { // Simulate a hot callsite for PrejitRoot mode. hasProfile = true; profileFreq = 1.0; } inlineResult->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, hasProfile); inlineResult->NoteDouble(InlineObservation::CALLSITE_PROFILE_FREQUENCY, profileFreq); } /***************************************************************************** This method makes STATIC inlining decision based on the IL code. It should not make any inlining decision based on the context. If forceInline is true, then the inlining decision should not depend on performance heuristics (code size, etc.). */ void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle, CORINFO_METHOD_INFO* methInfo, bool forceInline, InlineResult* inlineResult) { unsigned codeSize = methInfo->ILCodeSize; // We shouldn't have made up our minds yet... assert(!inlineResult->IsDecided()); if (methInfo->EHcount) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH); return; } if ((methInfo->ILCode == nullptr) || (codeSize == 0)) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY); return; } // For now we don't inline varargs (import code can't handle it) if (methInfo->args.isVarArg()) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS); return; } // Reject if it has too many locals. // This is currently an implementation limit due to fixed-size arrays in the // inline info, rather than a performance heuristic. inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs); if (methInfo->locals.numArgs > MAX_INL_LCLS) { inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS); return; } // Make sure there aren't too many arguments. // This is currently an implementation limit due to fixed-size arrays in the // inline info, rather than a performance heuristic. inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs); if (methInfo->args.numArgs > MAX_INL_ARGS) { inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS); return; } // Note force inline state inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline); // Note IL code size inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize); if (inlineResult->IsFailure()) { return; } // Make sure maxstack is not too big inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack); if (inlineResult->IsFailure()) { return; } } /***************************************************************************** */ void Compiler::impCheckCanInline(GenTreeCall* call, CORINFO_METHOD_HANDLE fncHandle, unsigned methAttr, CORINFO_CONTEXT_HANDLE exactContextHnd, InlineCandidateInfo** ppInlineCandidateInfo, InlineResult* inlineResult) { // Either EE or JIT might throw exceptions below. // If that happens, just don't inline the method. struct Param { Compiler* pThis; GenTreeCall* call; CORINFO_METHOD_HANDLE fncHandle; unsigned methAttr; CORINFO_CONTEXT_HANDLE exactContextHnd; InlineResult* result; InlineCandidateInfo** ppInlineCandidateInfo; } param; memset(&param, 0, sizeof(param)); param.pThis = this; param.call = call; param.fncHandle = fncHandle; param.methAttr = methAttr; param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle); param.result = inlineResult; param.ppInlineCandidateInfo = ppInlineCandidateInfo; bool success = eeRunWithErrorTrap<Param>( [](Param* pParam) { CorInfoInitClassResult initClassResult; #ifdef DEBUG const char* methodName; const char* className; methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className); if (JitConfig.JitNoInline()) { pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE); goto _exit; } #endif /* Try to get the code address/size for the method */ CORINFO_METHOD_INFO methInfo; if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo)) { pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO); goto _exit; } // Profile data allows us to avoid early "too many IL bytes" outs. pParam->result->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, pParam->pThis->fgHaveSufficientProfileData()); bool forceInline; forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE); pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result); if (pParam->result->IsFailure()) { assert(pParam->result->IsNever()); goto _exit; } // Speculatively check if initClass() can be done. // If it can be done, we will try to inline the method. initClassResult = pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */, pParam->exactContextHnd /* context */); if (initClassResult & CORINFO_INITCLASS_DONT_INLINE) { pParam->result->NoteFatal(InlineObservation::CALLSITE_CANT_CLASS_INIT); goto _exit; } // Given the EE the final say in whether to inline or not. // This should be last since for verifiable code, this can be expensive /* VM Inline check also ensures that the method is verifiable if needed */ CorInfoInline vmResult; vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle); if (vmResult == INLINE_FAIL) { pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE); } else if (vmResult == INLINE_NEVER) { pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE); } if (pParam->result->IsFailure()) { // Make sure not to report this one. It was already reported by the VM. pParam->result->SetReported(); goto _exit; } /* Get the method properties */ CORINFO_CLASS_HANDLE clsHandle; clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle); unsigned clsAttr; clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle); /* Get the return type */ var_types fncRetType; fncRetType = pParam->call->TypeGet(); #ifdef DEBUG var_types fncRealRetType; fncRealRetType = JITtype2varType(methInfo.args.retType); assert((genActualType(fncRealRetType) == genActualType(fncRetType)) || // <BUGNUM> VSW 288602 </BUGNUM> // In case of IJW, we allow to assign a native pointer to a BYREF. (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) || (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT))); #endif // Allocate an InlineCandidateInfo structure, // // Or, reuse the existing GuardedDevirtualizationCandidateInfo, // which was pre-allocated to have extra room. // InlineCandidateInfo* pInfo; if (pParam->call->IsGuardedDevirtualizationCandidate()) { pInfo = pParam->call->gtInlineCandidateInfo; } else { pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo; // Null out bits we don't use when we're just inlining pInfo->guardedClassHandle = nullptr; pInfo->guardedMethodHandle = nullptr; pInfo->guardedMethodUnboxedEntryHandle = nullptr; pInfo->likelihood = 0; pInfo->requiresInstMethodTableArg = false; } pInfo->methInfo = methInfo; pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd; pInfo->clsHandle = clsHandle; pInfo->exactContextHnd = pParam->exactContextHnd; pInfo->retExpr = nullptr; pInfo->preexistingSpillTemp = BAD_VAR_NUM; pInfo->clsAttr = clsAttr; pInfo->methAttr = pParam->methAttr; pInfo->initClassResult = initClassResult; pInfo->fncRetType = fncRetType; pInfo->exactContextNeedsRuntimeLookup = false; pInfo->inlinersContext = pParam->pThis->compInlineContext; // Note exactContextNeedsRuntimeLookup is reset later on, // over in impMarkInlineCandidate. *(pParam->ppInlineCandidateInfo) = pInfo; _exit:; }, &param); if (!success) { param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); } } //------------------------------------------------------------------------ // impInlineRecordArgInfo: record information about an inline candidate argument // // Arguments: // pInlineInfo - inline info for the inline candidate // curArgVal - tree for the caller actual argument value // argNum - logical index of this argument // inlineResult - result of ongoing inline evaluation // // Notes: // // Checks for various inline blocking conditions and makes notes in // the inline info arg table about the properties of the actual. These // properties are used later by impInlineFetchArg to determine how best to // pass the argument into the inlinee. void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo, GenTree* curArgVal, unsigned argNum, InlineResult* inlineResult) { InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum]; inlCurArgInfo->argNode = curArgVal; // Save the original tree, with PUT_ARG and RET_EXPR. curArgVal = curArgVal->gtSkipPutArgType(); curArgVal = curArgVal->gtRetExprVal(); if (curArgVal->gtOper == GT_MKREFANY) { inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY); return; } GenTree* lclVarTree; const bool isAddressInLocal = impIsAddressInLocal(curArgVal, &lclVarTree); if (isAddressInLocal && varTypeIsStruct(lclVarTree)) { inlCurArgInfo->argIsByRefToStructLocal = true; #ifdef FEATURE_SIMD if (lvaTable[lclVarTree->AsLclVarCommon()->GetLclNum()].lvSIMDType) { pInlineInfo->hasSIMDTypeArgLocalOrReturn = true; } #endif // FEATURE_SIMD } if (curArgVal->gtFlags & GTF_ALL_EFFECT) { inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0; inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0; } if (curArgVal->gtOper == GT_LCL_VAR) { inlCurArgInfo->argIsLclVar = true; /* Remember the "original" argument number */ INDEBUG(curArgVal->AsLclVar()->gtLclILoffs = argNum;) } if (curArgVal->IsInvariant()) { inlCurArgInfo->argIsInvariant = true; if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->AsIntCon()->gtIconVal == 0)) { // Abort inlining at this call site inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS); return; } } bool isExact = false; bool isNonNull = false; inlCurArgInfo->argIsExact = (gtGetClassHandle(curArgVal, &isExact, &isNonNull) != NO_CLASS_HANDLE) && isExact; // If the arg is a local that is address-taken, we can't safely // directly substitute it into the inlinee. // // Previously we'd accomplish this by setting "argHasLdargaOp" but // that has a stronger meaning: that the arg value can change in // the method body. Using that flag prevents type propagation, // which is safe in this case. // // Instead mark the arg as having a caller local ref. if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal)) { inlCurArgInfo->argHasCallerLocalRef = true; } #ifdef DEBUG if (verbose) { if (inlCurArgInfo->argIsThis) { printf("thisArg:"); } else { printf("\nArgument #%u:", argNum); } if (inlCurArgInfo->argIsLclVar) { printf(" is a local var"); } if (inlCurArgInfo->argIsInvariant) { printf(" is a constant"); } if (inlCurArgInfo->argHasGlobRef) { printf(" has global refs"); } if (inlCurArgInfo->argHasCallerLocalRef) { printf(" has caller local ref"); } if (inlCurArgInfo->argHasSideEff) { printf(" has side effects"); } if (inlCurArgInfo->argHasLdargaOp) { printf(" has ldarga effect"); } if (inlCurArgInfo->argHasStargOp) { printf(" has starg effect"); } if (inlCurArgInfo->argIsByRefToStructLocal) { printf(" is byref to a struct local"); } printf("\n"); gtDispTree(curArgVal); printf("\n"); } #endif } //------------------------------------------------------------------------ // impInlineInitVars: setup inline information for inlinee args and locals // // Arguments: // pInlineInfo - inline info for the inline candidate // // Notes: // This method primarily adds caller-supplied info to the inlArgInfo // and sets up the lclVarInfo table. // // For args, the inlArgInfo records properties of the actual argument // including the tree node that produces the arg value. This node is // usually the tree node present at the call, but may also differ in // various ways: // - when the call arg is a GT_RET_EXPR, we search back through the ret // expr chain for the actual node. Note this will either be the original // call (which will be a failed inline by this point), or the return // expression from some set of inlines. // - when argument type casting is needed the necessary casts are added // around the argument node. // - if an argument can be simplified by folding then the node here is the // folded value. // // The method may make observations that lead to marking this candidate as // a failed inline. If this happens the initialization is abandoned immediately // to try and reduce the jit time cost for a failed inline. void Compiler::impInlineInitVars(InlineInfo* pInlineInfo) { assert(!compIsForInlining()); GenTreeCall* call = pInlineInfo->iciCall; CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo; unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr; InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo; InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo; InlineResult* inlineResult = pInlineInfo->inlineResult; // Inlined methods always use the managed calling convention const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo, CorInfoCallConvExtension::Managed); /* init the argument stuct */ memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0])); GenTreeCall::Use* thisArg = call->gtCallThisArg; unsigned argCnt = 0; // Count of the arguments assert((methInfo->args.hasThis()) == (thisArg != nullptr)); if (thisArg != nullptr) { inlArgInfo[0].argIsThis = true; impInlineRecordArgInfo(pInlineInfo, thisArg->GetNode(), argCnt, inlineResult); if (inlineResult->IsFailure()) { return; } /* Increment the argument count */ argCnt++; } /* Record some information about each of the arguments */ bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0; #if USER_ARGS_COME_LAST unsigned typeCtxtArg = (thisArg != nullptr) ? 1 : 0; #else // USER_ARGS_COME_LAST unsigned typeCtxtArg = methInfo->args.totalILArgs(); #endif // USER_ARGS_COME_LAST for (GenTreeCall::Use& use : call->Args()) { if (hasRetBuffArg && (&use == call->gtCallArgs)) { continue; } // Ignore the type context argument if (hasTypeCtxtArg && (argCnt == typeCtxtArg)) { pInlineInfo->typeContextArg = typeCtxtArg; typeCtxtArg = 0xFFFFFFFF; continue; } GenTree* actualArg = gtFoldExpr(use.GetNode()); impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult); if (inlineResult->IsFailure()) { return; } /* Increment the argument count */ argCnt++; } /* Make sure we got the arg number right */ assert(argCnt == methInfo->args.totalILArgs()); #ifdef FEATURE_SIMD bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn; #endif // FEATURE_SIMD /* We have typeless opcodes, get type information from the signature */ if (thisArg != nullptr) { lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle); lclVarInfo[0].lclHasLdlocaOp = false; #ifdef FEATURE_SIMD // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase // the inlining multiplier) for anything in that assembly. // But we only need to normalize it if it is a TYP_STRUCT // (which we need to do even if we have already set foundSIMDType). if (!foundSIMDType && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo))) { foundSIMDType = true; } #endif // FEATURE_SIMD var_types sigType = ((clsAttr & CORINFO_FLG_VALUECLASS) != 0) ? TYP_BYREF : TYP_REF; lclVarInfo[0].lclTypeInfo = sigType; GenTree* thisArgNode = thisArg->GetNode(); assert(varTypeIsGC(thisArgNode->TypeGet()) || // "this" is managed ((thisArgNode->TypeGet() == TYP_I_IMPL) && // "this" is unmgd but the method's class doesnt care (clsAttr & CORINFO_FLG_VALUECLASS))); if (genActualType(thisArgNode->TypeGet()) != genActualType(sigType)) { if (sigType == TYP_REF) { /* The argument cannot be bashed into a ref (see bug 750871) */ inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF); return; } /* This can only happen with byrefs <-> ints/shorts */ assert(sigType == TYP_BYREF); assert((genActualType(thisArgNode->TypeGet()) == TYP_I_IMPL) || (thisArgNode->TypeGet() == TYP_BYREF)); lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } } /* Init the types of the arguments and make sure the types * from the trees match the types in the signature */ CORINFO_ARG_LIST_HANDLE argLst; argLst = methInfo->args.args; unsigned i; for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst)) { var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args); lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst); #ifdef FEATURE_SIMD if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo))) { // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've // found a SIMD type, even if this may not be a type we recognize (the assumption is that // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier). foundSIMDType = true; if (sigType == TYP_STRUCT) { var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle()); sigType = structType; } } #endif // FEATURE_SIMD lclVarInfo[i].lclTypeInfo = sigType; lclVarInfo[i].lclHasLdlocaOp = false; /* Does the tree type match the signature type? */ GenTree* inlArgNode = inlArgInfo[i].argNode; if ((sigType != inlArgNode->gtType) || inlArgNode->OperIs(GT_PUTARG_TYPE)) { assert(impCheckImplicitArgumentCoercion(sigType, inlArgNode->gtType)); assert(!varTypeIsStruct(inlArgNode->gtType) && !varTypeIsStruct(sigType)); /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints, but in bad IL cases with caller-callee signature mismatches we can see other types. Intentionally reject cases with mismatches so the jit is more flexible when encountering bad IL. */ bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) || (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) || (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType)); if (!isPlausibleTypeMatch) { inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE); return; } GenTree** pInlArgNode; if (inlArgNode->OperIs(GT_PUTARG_TYPE)) { // There was a widening or narrowing cast. GenTreeUnOp* putArgType = inlArgNode->AsUnOp(); pInlArgNode = &putArgType->gtOp1; inlArgNode = putArgType->gtOp1; } else { // The same size but different type of the arguments. pInlArgNode = &inlArgInfo[i].argNode; } /* Is it a narrowing or widening cast? * Widening casts are ok since the value computed is already * normalized to an int (on the IL stack) */ if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType)) { if (sigType == TYP_BYREF) { lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } else if (inlArgNode->gtType == TYP_BYREF) { assert(varTypeIsIntOrI(sigType)); /* If possible bash the BYREF to an int */ if (inlArgNode->IsLocalAddrExpr() != nullptr) { inlArgNode->gtType = TYP_I_IMPL; lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } else { /* Arguments 'int <- byref' cannot be changed */ inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT); return; } } else if (genTypeSize(sigType) < TARGET_POINTER_SIZE) { // Narrowing cast. if (inlArgNode->OperIs(GT_LCL_VAR)) { const unsigned lclNum = inlArgNode->AsLclVarCommon()->GetLclNum(); if (!lvaTable[lclNum].lvNormalizeOnLoad() && sigType == lvaGetRealType(lclNum)) { // We don't need to insert a cast here as the variable // was assigned a normalized value of the right type. continue; } } inlArgNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType); inlArgInfo[i].argIsLclVar = false; // Try to fold the node in case we have constant arguments. if (inlArgInfo[i].argIsInvariant) { inlArgNode = gtFoldExprConst(inlArgNode); assert(inlArgNode->OperIsConst()); } *pInlArgNode = inlArgNode; } #ifdef TARGET_64BIT else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType)) { // This should only happen for int -> native int widening inlArgNode = gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType); inlArgInfo[i].argIsLclVar = false; /* Try to fold the node in case we have constant arguments */ if (inlArgInfo[i].argIsInvariant) { inlArgNode = gtFoldExprConst(inlArgNode); assert(inlArgNode->OperIsConst()); } *pInlArgNode = inlArgNode; } #endif // TARGET_64BIT } } } /* Init the types of the local variables */ CORINFO_ARG_LIST_HANDLE localsSig; localsSig = methInfo->locals.args; for (i = 0; i < methInfo->locals.numArgs; i++) { bool isPinned; var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned); lclVarInfo[i + argCnt].lclHasLdlocaOp = false; lclVarInfo[i + argCnt].lclTypeInfo = type; if (varTypeIsGC(type)) { if (isPinned) { JITDUMP("Inlinee local #%02u is pinned\n", i); lclVarInfo[i + argCnt].lclIsPinned = true; // Pinned locals may cause inlines to fail. inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS); if (inlineResult->IsFailure()) { return; } } pInlineInfo->numberOfGcRefLocals++; } else if (isPinned) { JITDUMP("Ignoring pin on inlinee local #%02u -- not a GC type\n", i); } lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig); // If this local is a struct type with GC fields, inform the inliner. It may choose to bail // out on the inline. if (type == TYP_STRUCT) { CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle(); DWORD typeFlags = info.compCompHnd->getClassAttribs(lclHandle); if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0) { inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT); if (inlineResult->IsFailure()) { return; } // Do further notification in the case where the call site is rare; some policies do // not track the relative hotness of call sites for "always" inline cases. if (pInlineInfo->iciBlock->isRunRarely()) { inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT); if (inlineResult->IsFailure()) { return; } } } } localsSig = info.compCompHnd->getArgNext(localsSig); #ifdef FEATURE_SIMD if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo))) { foundSIMDType = true; if (supportSIMDTypes() && type == TYP_STRUCT) { var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle()); lclVarInfo[i + argCnt].lclTypeInfo = structType; } } #endif // FEATURE_SIMD } #ifdef FEATURE_SIMD if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd)) { foundSIMDType = true; } pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType; #endif // FEATURE_SIMD } //------------------------------------------------------------------------ // impInlineFetchLocal: get a local var that represents an inlinee local // // Arguments: // lclNum -- number of the inlinee local // reason -- debug string describing purpose of the local var // // Returns: // Number of the local to use // // Notes: // This method is invoked only for locals actually used in the // inlinee body. // // Allocates a new temp if necessary, and copies key properties // over from the inlinee local var info. unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason)) { assert(compIsForInlining()); unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum]; if (tmpNum == BAD_VAR_NUM) { const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt]; const var_types lclTyp = inlineeLocal.lclTypeInfo; // The lifetime of this local might span multiple BBs. // So it is a long lifetime local. impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason)); // Copy over key info lvaTable[tmpNum].lvType = lclTyp; lvaTable[tmpNum].lvHasLdAddrOp = inlineeLocal.lclHasLdlocaOp; lvaTable[tmpNum].lvPinned = inlineeLocal.lclIsPinned; lvaTable[tmpNum].lvHasILStoreOp = inlineeLocal.lclHasStlocOp; lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp; // Copy over class handle for ref types. Note this may be a // shared type -- someday perhaps we can get the exact // signature and pass in a more precise type. if (lclTyp == TYP_REF) { assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = !inlineeLocal.lclHasMultipleStlocOp && !inlineeLocal.lclHasLdlocaOp; if (lvaTable[tmpNum].lvSingleDef) { JITDUMP("Marked V%02u as a single def temp\n", tmpNum); } lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef()); } if (inlineeLocal.lclVerTypeInfo.IsStruct()) { if (varTypeIsStruct(lclTyp)) { lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */); } else { // This is a wrapped primitive. Make sure the verstate knows that lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo; } } #ifdef DEBUG // Sanity check that we're properly prepared for gc ref locals. if (varTypeIsGC(lclTyp)) { // Since there are gc locals we should have seen them earlier // and if there was a return value, set up the spill temp. assert(impInlineInfo->HasGcRefLocals()); assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp()); } else { // Make sure all pinned locals count as gc refs. assert(!inlineeLocal.lclIsPinned); } #endif // DEBUG } return tmpNum; } //------------------------------------------------------------------------ // impInlineFetchArg: return tree node for argument value in an inlinee // // Arguments: // lclNum -- argument number in inlinee IL // inlArgInfo -- argument info for inlinee // lclVarInfo -- var info for inlinee // // Returns: // Tree for the argument's value. Often an inlinee-scoped temp // GT_LCL_VAR but can be other tree kinds, if the argument // expression from the caller can be directly substituted into the // inlinee body. // // Notes: // Must be used only for arguments -- use impInlineFetchLocal for // inlinee locals. // // Direct substitution is performed when the formal argument cannot // change value in the inlinee body (no starg or ldarga), and the // actual argument expression's value cannot be changed if it is // substituted it into the inlinee body. // // Even if an inlinee-scoped temp is returned here, it may later be // "bashed" to a caller-supplied tree when arguments are actually // passed (see fgInlinePrependStatements). Bashing can happen if // the argument ends up being single use and other conditions are // met. So the contents of the tree returned here may not end up // being the ones ultimately used for the argument. // // This method will side effect inlArgInfo. It should only be called // for actual uses of the argument in the inlinee. GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo) { // Cache the relevant arg and lcl info for this argument. // We will modify argInfo but not lclVarInfo. InlArgInfo& argInfo = inlArgInfo[lclNum]; const InlLclVarInfo& lclInfo = lclVarInfo[lclNum]; const bool argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp; const var_types lclTyp = lclInfo.lclTypeInfo; GenTree* op1 = nullptr; GenTree* argNode = argInfo.argNode->gtSkipPutArgType()->gtRetExprVal(); if (argInfo.argIsInvariant && !argCanBeModified) { // Directly substitute constants or addresses of locals // // Clone the constant. Note that we cannot directly use // argNode in the trees even if !argInfo.argIsUsed as this // would introduce aliasing between inlArgInfo[].argNode and // impInlineExpr. Then gtFoldExpr() could change it, causing // further references to the argument working off of the // bashed copy. op1 = gtCloneExpr(argNode); PREFIX_ASSUME(op1 != nullptr); argInfo.argTmpNum = BAD_VAR_NUM; // We may need to retype to ensure we match the callee's view of the type. // Otherwise callee-pass throughs of arguments can create return type // mismatches that block inlining. // // Note argument type mismatches that prevent inlining should // have been caught in impInlineInitVars. if (op1->TypeGet() != lclTyp) { op1->gtType = genActualType(lclTyp); } } else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef) { // Directly substitute unaliased caller locals for args that cannot be modified // // Use the caller-supplied node if this is the first use. op1 = argNode; unsigned argLclNum = op1->AsLclVarCommon()->GetLclNum(); argInfo.argTmpNum = argLclNum; // Use an equivalent copy if this is the second or subsequent // use. // // Note argument type mismatches that prevent inlining should // have been caught in impInlineInitVars. If inlining is not prevented // but a cast is necessary, we similarly expect it to have been inserted then. // So here we may have argument type mismatches that are benign, for instance // passing a TYP_SHORT local (eg. normalized-on-load) as a TYP_INT arg. // The exception is when the inlining means we should start tracking the argument. if (argInfo.argIsUsed || ((lclTyp == TYP_BYREF) && (op1->TypeGet() != TYP_BYREF))) { assert(op1->gtOper == GT_LCL_VAR); assert(lclNum == op1->AsLclVar()->gtLclILoffs); // Create a new lcl var node - remember the argument lclNum op1 = impCreateLocalNode(argLclNum DEBUGARG(op1->AsLclVar()->gtLclILoffs)); // Start tracking things as a byref if the parameter is a byref. if (lclTyp == TYP_BYREF) { op1->gtType = TYP_BYREF; } } } else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp) { /* Argument is a by-ref address to a struct, a normed struct, or its field. In these cases, don't spill the byref to a local, simply clone the tree and use it. This way we will increase the chance for this byref to be optimized away by a subsequent "dereference" operation. From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal. For example, if the caller is: ldloca.s V_1 // V_1 is a local struct call void Test.ILPart::RunLdargaOnPointerArg(int32*) and the callee being inlined has: .method public static void RunLdargaOnPointerArg(int32* ptrToInts) cil managed ldarga.s ptrToInts call void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**) then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR. */ assert(argNode->TypeGet() == TYP_BYREF || argNode->TypeGet() == TYP_I_IMPL); op1 = gtCloneExpr(argNode); } else { /* Argument is a complex expression - it must be evaluated into a temp */ if (argInfo.argHasTmp) { assert(argInfo.argIsUsed); assert(argInfo.argTmpNum < lvaCount); /* Create a new lcl var node - remember the argument lclNum */ op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp)); /* This is the second or later use of the this argument, so we have to use the temp (instead of the actual arg) */ argInfo.argBashTmpNode = nullptr; } else { /* First time use */ assert(!argInfo.argIsUsed); /* Reserve a temp for the expression. * Use a large size node as we may change it later */ const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg")); lvaTable[tmpNum].lvType = lclTyp; // For ref types, determine the type of the temp. if (lclTyp == TYP_REF) { if (!argCanBeModified) { // If the arg can't be modified in the method // body, use the type of the value, if // known. Otherwise, use the declared type. assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmpNum); lvaSetClass(tmpNum, argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef()); } else { // Arg might be modified, use the declared type of // the argument. lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef()); } } assert(!lvaTable[tmpNum].IsAddressExposed()); if (argInfo.argHasLdargaOp) { lvaTable[tmpNum].lvHasLdAddrOp = 1; } if (lclInfo.lclVerTypeInfo.IsStruct()) { if (varTypeIsStruct(lclTyp)) { lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */); if (info.compIsVarArgs) { lvaSetStructUsedAsVarArg(tmpNum); } } else { // This is a wrapped primitive. Make sure the verstate knows that lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo; } } argInfo.argHasTmp = true; argInfo.argTmpNum = tmpNum; // If we require strict exception order, then arguments must // be evaluated in sequence before the body of the inlined method. // So we need to evaluate them to a temp. // Also, if arguments have global or local references, we need to // evaluate them to a temp before the inlined body as the // inlined body may be modifying the global ref. // TODO-1stClassStructs: We currently do not reuse an existing lclVar // if it is a struct, because it requires some additional handling. if ((!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef && !argInfo.argHasCallerLocalRef)) { /* Get a *LARGE* LCL_VAR node */ op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp) DEBUGARG(lclNum)); /* Record op1 as the very first use of this argument. If there are no further uses of the arg, we may be able to use the actual arg node instead of the temp. If we do see any further uses, we will clear this. */ argInfo.argBashTmpNode = op1; } else { /* Get a small LCL_VAR node */ op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp)); /* No bashing of this argument */ argInfo.argBashTmpNode = nullptr; } } } // Mark this argument as used. argInfo.argIsUsed = true; return op1; } /****************************************************************************** Is this the original "this" argument to the call being inlined? Note that we do not inline methods with "starg 0", and so we do not need to worry about it. */ bool Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo) { assert(compIsForInlining()); return (tree->gtOper == GT_LCL_VAR && tree->AsLclVarCommon()->GetLclNum() == inlArgInfo[0].argTmpNum); } //----------------------------------------------------------------------------- // impInlineIsGuaranteedThisDerefBeforeAnySideEffects: Check if a dereference in // the inlinee can guarantee that the "this" pointer is non-NULL. // // Arguments: // additionalTree - a tree to check for side effects // additionalCallArgs - a list of call args to check for side effects // dereferencedAddress - address expression being dereferenced // inlArgInfo - inlinee argument information // // Notes: // If we haven't hit a branch or a side effect, and we are dereferencing // from 'this' to access a field or make GTF_CALL_NULLCHECK call, // then we can avoid a separate null pointer check. // // The importer stack and current statement list are searched for side effects. // Trees that have been popped of the stack but haven't been appended to the // statement list and have to be checked for side effects may be provided via // additionalTree and additionalCallArgs. // bool Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree, GenTreeCall::Use* additionalCallArgs, GenTree* dereferencedAddress, InlArgInfo* inlArgInfo) { assert(compIsForInlining()); assert(opts.OptEnabled(CLFLG_INLINING)); BasicBlock* block = compCurBB; if (block != fgFirstBB) { return false; } if (!impInlineIsThis(dereferencedAddress, inlArgInfo)) { return false; } if ((additionalTree != nullptr) && GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTree->gtFlags)) { return false; } for (GenTreeCall::Use& use : GenTreeCall::UseList(additionalCallArgs)) { if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(use.GetNode()->gtFlags)) { return false; } } for (Statement* stmt : StatementList(impStmtList)) { GenTree* expr = stmt->GetRootNode(); if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags)) { return false; } } for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTreeFlags stackTreeFlags = verCurrentState.esStack[level].val->gtFlags; if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags)) { return false; } } return true; } //------------------------------------------------------------------------ // impMarkInlineCandidate: determine if this call can be subsequently inlined // // Arguments: // callNode -- call under scrutiny // exactContextHnd -- context handle for inlining // exactContextNeedsRuntimeLookup -- true if context required runtime lookup // callInfo -- call info from VM // // Notes: // Mostly a wrapper for impMarkInlineCandidateHelper that also undoes // guarded devirtualization for virtual calls where the method we'd // devirtualize to cannot be inlined. void Compiler::impMarkInlineCandidate(GenTree* callNode, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo) { GenTreeCall* call = callNode->AsCall(); // Do the actual evaluation impMarkInlineCandidateHelper(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); // If this call is an inline candidate or is not a guarded devirtualization // candidate, we're done. if (call->IsInlineCandidate() || !call->IsGuardedDevirtualizationCandidate()) { return; } // If we can't inline the call we'd guardedly devirtualize to, // we undo the guarded devirtualization, as the benefit from // just guarded devirtualization alone is likely not worth the // extra jit time and code size. // // TODO: it is possibly interesting to allow this, but requires // fixes elsewhere too... JITDUMP("Revoking guarded devirtualization candidacy for call [%06u]: target method can't be inlined\n", dspTreeID(call)); call->ClearGuardedDevirtualizationCandidate(); } //------------------------------------------------------------------------ // impMarkInlineCandidateHelper: determine if this call can be subsequently // inlined // // Arguments: // callNode -- call under scrutiny // exactContextHnd -- context handle for inlining // exactContextNeedsRuntimeLookup -- true if context required runtime lookup // callInfo -- call info from VM // // Notes: // If callNode is an inline candidate, this method sets the flag // GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have // filled in the associated InlineCandidateInfo. // // If callNode is not an inline candidate, and the reason is // something that is inherent to the method being called, the // method may be marked as "noinline" to short-circuit any // future assessments of calls to this method. void Compiler::impMarkInlineCandidateHelper(GenTreeCall* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo) { // Let the strategy know there's another call impInlineRoot()->m_inlineStrategy->NoteCall(); if (!opts.OptEnabled(CLFLG_INLINING)) { /* XXX Mon 8/18/2008 * This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before * calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and * CLFLG_MINOPT is set. That doesn't make a lot of sense. If you hit this assert, work back and * figure out why we did not set MAXOPT for this compile. */ assert(!compIsForInlining()); return; } if (compIsForImportOnly()) { // Don't bother creating the inline candidate during verification. // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification // that leads to the creation of multiple instances of Compiler. return; } InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate"); // Don't inline if not optimizing root method if (opts.compDbgCode) { inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN); return; } // Don't inline if inlining into this method is disabled. if (impInlineRoot()->m_inlineStrategy->IsInliningDisabled()) { inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE); return; } // Don't inline into callers that use the NextCallReturnAddress intrinsic. if (info.compHasNextCallRetAddr) { inlineResult.NoteFatal(InlineObservation::CALLER_USES_NEXT_CALL_RET_ADDR); return; } // Inlining candidate determination needs to honor only IL tail prefix. // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive). if (call->IsTailPrefixedCall()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX); return; } // Delegate Invoke method doesn't have a body and gets special cased instead. // Don't even bother trying to inline it. if (call->IsDelegateInvoke()) { inlineResult.NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY); return; } // Tail recursion elimination takes precedence over inlining. // TODO: We may want to do some of the additional checks from fgMorphCall // here to reduce the chance we don't inline a call that won't be optimized // as a fast tail call or turned into a loop. if (gtIsRecursiveCall(call) && call->IsImplicitTailCall()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL); return; } if (call->IsVirtual()) { // Allow guarded devirt calls to be treated as inline candidates, // but reject all other virtual calls. if (!call->IsGuardedDevirtualizationCandidate()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT); return; } } /* Ignore helper calls */ if (call->gtCallType == CT_HELPER) { assert(!call->IsGuardedDevirtualizationCandidate()); inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER); return; } /* Ignore indirect calls */ if (call->gtCallType == CT_INDIRECT) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED); return; } /* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less * restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding * inlining in throw blocks. I should consider the same thing for catch and filter regions. */ CORINFO_METHOD_HANDLE fncHandle; unsigned methAttr; if (call->IsGuardedDevirtualizationCandidate()) { if (call->gtGuardedDevirtualizationCandidateInfo->guardedMethodUnboxedEntryHandle != nullptr) { fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodUnboxedEntryHandle; } else { fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodHandle; } methAttr = info.compCompHnd->getMethodAttribs(fncHandle); } else { fncHandle = call->gtCallMethHnd; // Reuse method flags from the original callInfo if possible if (fncHandle == callInfo->hMethod) { methAttr = callInfo->methodFlags; } else { methAttr = info.compCompHnd->getMethodAttribs(fncHandle); } } #ifdef DEBUG if (compStressCompile(STRESS_FORCE_INLINE, 0)) { methAttr |= CORINFO_FLG_FORCEINLINE; } #endif // Check for COMPlus_AggressiveInlining if (compDoAggressiveInlining) { methAttr |= CORINFO_FLG_FORCEINLINE; } if (!(methAttr & CORINFO_FLG_FORCEINLINE)) { /* Don't bother inline blocks that are in the filter region */ if (bbInCatchHandlerILRange(compCurBB)) { #ifdef DEBUG if (verbose) { printf("\nWill not inline blocks that are in the catch handler region\n"); } #endif inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH); return; } if (bbInFilterILRange(compCurBB)) { #ifdef DEBUG if (verbose) { printf("\nWill not inline blocks that are in the filter region\n"); } #endif inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER); return; } } /* Check if we tried to inline this method before */ if (methAttr & CORINFO_FLG_DONT_INLINE) { inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE); return; } /* Cannot inline synchronized methods */ if (methAttr & CORINFO_FLG_SYNCH) { inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED); return; } /* Check legality of PInvoke callsite (for inlining of marshalling code) */ if (methAttr & CORINFO_FLG_PINVOKE) { // See comment in impCheckForPInvokeCall BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; if (!impCanPInvokeInlineCallSite(block)) { inlineResult.NoteFatal(InlineObservation::CALLSITE_PINVOKE_EH); return; } } InlineCandidateInfo* inlineCandidateInfo = nullptr; impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult); if (inlineResult.IsFailure()) { return; } // The old value should be null OR this call should be a guarded devirtualization candidate. assert((call->gtInlineCandidateInfo == nullptr) || call->IsGuardedDevirtualizationCandidate()); // The new value should not be null. assert(inlineCandidateInfo != nullptr); inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup; call->gtInlineCandidateInfo = inlineCandidateInfo; // If we're in an inlinee compiler, and have a return spill temp, and this inline candidate // is also a tail call candidate, it can use the same return spill temp. // if (compIsForInlining() && call->CanTailCall() && (impInlineInfo->inlineCandidateInfo->preexistingSpillTemp != BAD_VAR_NUM)) { inlineCandidateInfo->preexistingSpillTemp = impInlineInfo->inlineCandidateInfo->preexistingSpillTemp; JITDUMP("Inline candidate [%06u] can share spill temp V%02u\n", dspTreeID(call), inlineCandidateInfo->preexistingSpillTemp); } // Mark the call node as inline candidate. call->gtFlags |= GTF_CALL_INLINE_CANDIDATE; // Let the strategy know there's another candidate. impInlineRoot()->m_inlineStrategy->NoteCandidate(); // Since we're not actually inlining yet, and this call site is // still just an inline candidate, there's nothing to report. inlineResult.SetReported(); } /******************************************************************************/ // Returns true if the given intrinsic will be implemented by target-specific // instructions bool Compiler::IsTargetIntrinsic(NamedIntrinsic intrinsicName) { #if defined(TARGET_XARCH) switch (intrinsicName) { // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1 // instructions to directly compute round/ceiling/floor/truncate. case NI_System_Math_Abs: case NI_System_Math_Sqrt: return true; case NI_System_Math_Ceiling: case NI_System_Math_Floor: case NI_System_Math_Truncate: case NI_System_Math_Round: return compOpportunisticallyDependsOn(InstructionSet_SSE41); case NI_System_Math_FusedMultiplyAdd: return compOpportunisticallyDependsOn(InstructionSet_FMA); default: return false; } #elif defined(TARGET_ARM64) switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Ceiling: case NI_System_Math_Floor: case NI_System_Math_Truncate: case NI_System_Math_Round: case NI_System_Math_Sqrt: case NI_System_Math_Max: case NI_System_Math_Min: return true; case NI_System_Math_FusedMultiplyAdd: return compOpportunisticallyDependsOn(InstructionSet_AdvSimd); default: return false; } #elif defined(TARGET_ARM) switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Round: case NI_System_Math_Sqrt: return true; default: return false; } #else // TODO: This portion of logic is not implemented for other arch. // The reason for returning true is that on all other arch the only intrinsic // enabled are target intrinsics. return true; #endif } /******************************************************************************/ // Returns true if the given intrinsic will be implemented by calling System.Math // methods. bool Compiler::IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName) { // Currently, if a math intrinsic is not implemented by target-specific // instructions, it will be implemented by a System.Math call. In the // future, if we turn to implementing some of them with helper calls, // this predicate needs to be revisited. return !IsTargetIntrinsic(intrinsicName); } bool Compiler::IsMathIntrinsic(NamedIntrinsic intrinsicName) { switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Acos: case NI_System_Math_Acosh: case NI_System_Math_Asin: case NI_System_Math_Asinh: case NI_System_Math_Atan: case NI_System_Math_Atanh: case NI_System_Math_Atan2: case NI_System_Math_Cbrt: case NI_System_Math_Ceiling: case NI_System_Math_Cos: case NI_System_Math_Cosh: case NI_System_Math_Exp: case NI_System_Math_Floor: case NI_System_Math_FMod: case NI_System_Math_FusedMultiplyAdd: case NI_System_Math_ILogB: case NI_System_Math_Log: case NI_System_Math_Log2: case NI_System_Math_Log10: case NI_System_Math_Max: case NI_System_Math_Min: case NI_System_Math_Pow: case NI_System_Math_Round: case NI_System_Math_Sin: case NI_System_Math_Sinh: case NI_System_Math_Sqrt: case NI_System_Math_Tan: case NI_System_Math_Tanh: case NI_System_Math_Truncate: { assert((intrinsicName > NI_SYSTEM_MATH_START) && (intrinsicName < NI_SYSTEM_MATH_END)); return true; } default: { assert((intrinsicName < NI_SYSTEM_MATH_START) || (intrinsicName > NI_SYSTEM_MATH_END)); return false; } } } bool Compiler::IsMathIntrinsic(GenTree* tree) { return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->AsIntrinsic()->gtIntrinsicName); } //------------------------------------------------------------------------ // impDevirtualizeCall: Attempt to change a virtual vtable call into a // normal call // // Arguments: // call -- the call node to examine/modify // pResolvedToken -- [IN] the resolved token used to create the call. Used for R2R. // method -- [IN/OUT] the method handle for call. Updated iff call devirtualized. // methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized. // pContextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized. // pExactContextHandle -- [OUT] updated context handle iff call devirtualized // isLateDevirtualization -- if devirtualization is happening after importation // isExplicitTailCalll -- [IN] true if we plan on using an explicit tail call // ilOffset -- IL offset of the call // // Notes: // Virtual calls in IL will always "invoke" the base class method. // // This transformation looks for evidence that the type of 'this' // in the call is exactly known, is a final class or would invoke // a final method, and if that and other safety checks pan out, // modifies the call and the call info to create a direct call. // // This transformation is initially done in the importer and not // in some subsequent optimization pass because we want it to be // upstream of inline candidate identification. // // However, later phases may supply improved type information that // can enable further devirtualization. We currently reinvoke this // code after inlining, if the return value of the inlined call is // the 'this obj' of a subsequent virtual call. // // If devirtualization succeeds and the call's this object is a // (boxed) value type, the jit will ask the EE for the unboxed entry // point. If this exists, the jit will invoke the unboxed entry // on the box payload. In addition if the boxing operation is // visible to the jit and the call is the only consmer of the box, // the jit will try analyze the box to see if the call can be instead // instead made on a local copy. If that is doable, the call is // updated to invoke the unboxed entry on the local copy and the // boxing operation is removed. // // When guarded devirtualization is enabled, this method will mark // calls as guarded devirtualization candidates, if the type of `this` // is not exactly known, and there is a plausible guess for the type. void Compiler::impDevirtualizeCall(GenTreeCall* call, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_METHOD_HANDLE* method, unsigned* methodFlags, CORINFO_CONTEXT_HANDLE* pContextHandle, CORINFO_CONTEXT_HANDLE* pExactContextHandle, bool isLateDevirtualization, bool isExplicitTailCall, IL_OFFSET ilOffset) { assert(call != nullptr); assert(method != nullptr); assert(methodFlags != nullptr); assert(pContextHandle != nullptr); // This should be a virtual vtable or virtual stub call. // assert(call->IsVirtual()); // Possibly instrument. Note for OSR+PGO we will instrument when // optimizing and (currently) won't devirtualize. We may want // to revisit -- if we can devirtualize we should be able to // suppress the probe. // // We strip BBINSTR from inlinees currently, so we'll only // do this for the root method calls. // if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { assert(opts.OptimizationDisabled() || opts.IsOSR()); assert(!compIsForInlining()); // During importation, optionally flag this block as one that // contains calls requiring class profiling. Ideally perhaps // we'd just keep track of the calls themselves, so we don't // have to search for them later. // if ((call->gtCallType != CT_INDIRECT) && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && (JitConfig.JitClassProfiling() > 0) && !isLateDevirtualization) { JITDUMP("\n ... marking [%06u] in " FMT_BB " for class profile instrumentation\n", dspTreeID(call), compCurBB->bbNum); ClassProfileCandidateInfo* pInfo = new (this, CMK_Inlining) ClassProfileCandidateInfo; // Record some info needed for the class profiling probe. // pInfo->ilOffset = ilOffset; pInfo->probeIndex = info.compClassProbeCount++; call->gtClassProfileCandidateInfo = pInfo; // Flag block as needing scrutiny // compCurBB->bbFlags |= BBF_HAS_CLASS_PROFILE; } return; } // Bail if optimizations are disabled. if (opts.OptimizationDisabled()) { return; } #if defined(DEBUG) // Bail if devirt is disabled. if (JitConfig.JitEnableDevirtualization() == 0) { return; } // Optionally, print info on devirtualization Compiler* const rootCompiler = impInlineRoot(); const bool doPrint = JitConfig.JitPrintDevirtualizedMethods().contains(rootCompiler->info.compMethodName, rootCompiler->info.compClassName, &rootCompiler->info.compMethodInfo->args); #endif // DEBUG // Fetch information about the virtual method we're calling. CORINFO_METHOD_HANDLE baseMethod = *method; unsigned baseMethodAttribs = *methodFlags; if (baseMethodAttribs == 0) { // For late devirt we may not have method attributes, so fetch them. baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod); } else { #if defined(DEBUG) // Validate that callInfo has up to date method flags const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod); // All the base method attributes should agree, save that // CORINFO_FLG_DONT_INLINE may have changed from 0 to 1 // because of concurrent jitting activity. // // Note we don't look at this particular flag bit below, and // later on (if we do try and inline) we will rediscover why // the method can't be inlined, so there's no danger here in // seeing this particular flag bit in different states between // the cached and fresh values. if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE)) { assert(!"mismatched method attributes"); } #endif // DEBUG } // In R2R mode, we might see virtual stub calls to // non-virtuals. For instance cases where the non-virtual method // is in a different assembly but is called via CALLVIRT. For // verison resilience we must allow for the fact that the method // might become virtual in some update. // // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a // regular call+nullcheck upstream, so we won't reach this // point. if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0) { assert(call->IsVirtualStub()); assert(opts.IsReadyToRun()); JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n"); return; } // Fetch information about the class that introduced the virtual method. CORINFO_CLASS_HANDLE baseClass = info.compCompHnd->getMethodClass(baseMethod); const DWORD baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass); // Is the call an interface call? const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0; // See what we know about the type of 'this' in the call. GenTree* thisObj = call->gtCallThisArg->GetNode()->gtEffectiveVal(false); bool isExact = false; bool objIsNonNull = false; CORINFO_CLASS_HANDLE objClass = gtGetClassHandle(thisObj, &isExact, &objIsNonNull); // Bail if we know nothing. if (objClass == NO_CLASS_HANDLE) { JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet())); // Don't try guarded devirtualiztion when we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG("unknown")); return; } // If the objClass is sealed (final), then we may be able to devirtualize. const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass); const bool objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0; #if defined(DEBUG) const char* callKind = isInterface ? "interface" : "virtual"; const char* objClassNote = "[?]"; const char* objClassName = "?objClass"; const char* baseClassName = "?baseClass"; const char* baseMethodName = "?baseMethod"; if (verbose || doPrint) { objClassNote = isExact ? " [exact]" : objClassIsFinal ? " [final]" : ""; objClassName = info.compCompHnd->getClassName(objClass); baseClassName = info.compCompHnd->getClassName(baseClass); baseMethodName = eeGetMethodName(baseMethod, nullptr); if (verbose) { printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n" " class for 'this' is %s%s (attrib %08x)\n" " base method is %s::%s\n", callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName); } } #endif // defined(DEBUG) // See if the jit's best type for `obj` is an interface. // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal // IL_021d: ldloc.0 // IL_021e: callvirt instance int32 System.Object::GetHashCode() // // If so, we can't devirtualize, but we may be able to do guarded devirtualization. // if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0) { // Don't try guarded devirtualiztion when we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG(objClassName)); return; } // If we get this far, the jit has a lower bound class type for the `this` object being used for dispatch. // It may or may not know enough to devirtualize... if (isInterface) { assert(call->IsVirtualStub()); JITDUMP("--- base class is interface\n"); } // Fetch the method that would be called based on the declared type of 'this', // and prepare to fetch the method attributes. // CORINFO_DEVIRTUALIZATION_INFO dvInfo; dvInfo.virtualMethod = baseMethod; dvInfo.objClass = objClass; dvInfo.context = *pContextHandle; dvInfo.detail = CORINFO_DEVIRTUALIZATION_UNKNOWN; dvInfo.pResolvedTokenVirtualMethod = pResolvedToken; info.compCompHnd->resolveVirtualMethod(&dvInfo); CORINFO_METHOD_HANDLE derivedMethod = dvInfo.devirtualizedMethod; CORINFO_CONTEXT_HANDLE exactContext = dvInfo.exactContext; CORINFO_CLASS_HANDLE derivedClass = NO_CLASS_HANDLE; CORINFO_RESOLVED_TOKEN* pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedMethod; if (derivedMethod != nullptr) { assert(exactContext != nullptr); assert(((size_t)exactContext & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS); derivedClass = (CORINFO_CLASS_HANDLE)((size_t)exactContext & ~CORINFO_CONTEXTFLAGS_MASK); } DWORD derivedMethodAttribs = 0; bool derivedMethodIsFinal = false; bool canDevirtualize = false; #if defined(DEBUG) const char* derivedClassName = "?derivedClass"; const char* derivedMethodName = "?derivedMethod"; const char* note = "inexact or not final"; #endif // If we failed to get a method handle, we can't directly devirtualize. // // This can happen when prejitting, if the devirtualization crosses // servicing bubble boundaries, or if objClass is a shared class. // if (derivedMethod == nullptr) { JITDUMP("--- no derived method: %s\n", devirtualizationDetailToString(dvInfo.detail)); } else { // Fetch method attributes to see if method is marked final. derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod); derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0); #if defined(DEBUG) if (isExact) { note = "exact"; } else if (objClassIsFinal) { note = "final class"; } else if (derivedMethodIsFinal) { note = "final method"; } if (verbose || doPrint) { derivedMethodName = eeGetMethodName(derivedMethod, nullptr); derivedClassName = eeGetClassName(derivedClass); if (verbose) { printf(" devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note); gtDispTree(call); } } #endif // defined(DEBUG) canDevirtualize = isExact || objClassIsFinal || (!isInterface && derivedMethodIsFinal); } // We still might be able to do a guarded devirtualization. // Note the call might be an interface call or a virtual call. // if (!canDevirtualize) { JITDUMP(" Class not final or exact%s\n", isInterface ? "" : ", and method not final"); #if defined(DEBUG) // If we know the object type exactly, we generally expect we can devirtualize. // (don't when doing late devirt as we won't have an owner type (yet)) // if (!isLateDevirtualization && (isExact || objClassIsFinal) && JitConfig.JitNoteFailedExactDevirtualization()) { printf("@@@ Exact/Final devirt failure in %s at [%06u] $ %s\n", info.compFullName, dspTreeID(call), devirtualizationDetailToString(dvInfo.detail)); } #endif // Don't try guarded devirtualiztion if we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG(objClassName)); return; } // All checks done. Time to transform the call. // // We should always have an exact class context. // // Note that wouldnt' be true if the runtime side supported array interface devirt, // the resulting method would be a generic method of the non-generic SZArrayHelper class. // assert(canDevirtualize); JITDUMP(" %s; can devirtualize\n", note); // Make the updates. call->gtFlags &= ~GTF_CALL_VIRT_VTABLE; call->gtFlags &= ~GTF_CALL_VIRT_STUB; call->gtCallMethHnd = derivedMethod; call->gtCallType = CT_USER_FUNC; call->gtCallMoreFlags |= GTF_CALL_M_DEVIRTUALIZED; // Virtual calls include an implicit null check, which we may // now need to make explicit. if (!objIsNonNull) { call->gtFlags |= GTF_CALL_NULLCHECK; } // Clear the inline candidate info (may be non-null since // it's a union field used for other things by virtual // stubs) call->gtInlineCandidateInfo = nullptr; #if defined(DEBUG) if (verbose) { printf("... after devirt...\n"); gtDispTree(call); } if (doPrint) { printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName, baseMethodName, derivedClassName, derivedMethodName, note); } // If we successfully devirtualized based on an exact or final class, // and we have dynamic PGO data describing the likely class, make sure they agree. // // If pgo source is not dynamic we may see likely classes from other versions of this code // where types had different properties. // // If method is an inlinee we may be specializing to a class that wasn't seen at runtime. // const bool canSensiblyCheck = (isExact || objClassIsFinal) && (fgPgoSource == ICorJitInfo::PgoSource::Dynamic) && !compIsForInlining(); if (JitConfig.JitCrossCheckDevirtualizationAndPGO() && canSensiblyCheck) { // We only can handle a single likely class for now const int maxLikelyClasses = 1; LikelyClassRecord likelyClasses[maxLikelyClasses]; UINT32 numberOfClasses = getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset); UINT32 likelihood = likelyClasses[0].likelihood; CORINFO_CLASS_HANDLE likelyClass = likelyClasses[0].clsHandle; if (numberOfClasses > 0) { // PGO had better agree the class we devirtualized to is plausible. // if (likelyClass != derivedClass) { // Managed type system may report different addresses for a class handle // at different times....? // // Also, AOT may have a more nuanced notion of class equality. // if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { bool mismatch = true; // derivedClass will be the introducer of derived method, so it's possible // likelyClass is a non-overriding subclass. Check up the hierarchy. // CORINFO_CLASS_HANDLE parentClass = likelyClass; while (parentClass != NO_CLASS_HANDLE) { if (parentClass == derivedClass) { mismatch = false; break; } parentClass = info.compCompHnd->getParentType(parentClass); } if (mismatch || (numberOfClasses != 1) || (likelihood != 100)) { printf("@@@ Likely %p (%s) != Derived %p (%s) [n=%u, l=%u, il=%u] in %s \n", likelyClass, eeGetClassName(likelyClass), derivedClass, eeGetClassName(derivedClass), numberOfClasses, likelihood, ilOffset, info.compFullName); } assert(!(mismatch || (numberOfClasses != 1) || (likelihood != 100))); } } } } #endif // defined(DEBUG) // If the 'this' object is a value class, see if we can rework the call to invoke the // unboxed entry. This effectively inlines the normally un-inlineable wrapper stub // and exposes the potentially inlinable unboxed entry method. // // We won't optimize explicit tail calls, as ensuring we get the right tail call info // is tricky (we'd need to pass an updated sig and resolved token back to some callers). // // Note we may not have a derived class in some cases (eg interface call on an array) // if (info.compCompHnd->isValueClass(derivedClass)) { if (isExplicitTailCall) { JITDUMP("Have a direct explicit tail call to boxed entry point; can't optimize further\n"); } else { JITDUMP("Have a direct call to boxed entry point. Trying to optimize to call an unboxed entry point\n"); // Note for some shared methods the unboxed entry point requires an extra parameter. bool requiresInstMethodTableArg = false; CORINFO_METHOD_HANDLE unboxedEntryMethod = info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg); if (unboxedEntryMethod != nullptr) { bool optimizedTheBox = false; // If the 'this' object is a local box, see if we can revise things // to not require boxing. // if (thisObj->IsBoxedValue() && !isExplicitTailCall) { // Since the call is the only consumer of the box, we know the box can't escape // since it is being passed an interior pointer. // // So, revise the box to simply create a local copy, use the address of that copy // as the this pointer, and update the entry point to the unboxed entry. // // Ideally, we then inline the boxed method and and if it turns out not to modify // the copy, we can undo the copy too. if (requiresInstMethodTableArg) { // Perform a trial box removal and ask for the type handle tree that fed the box. // JITDUMP("Unboxed entry needs method table arg...\n"); GenTree* methodTableArg = gtTryRemoveBoxUpstreamEffects(thisObj, BR_DONT_REMOVE_WANT_TYPE_HANDLE); if (methodTableArg != nullptr) { // If that worked, turn the box into a copy to a local var // JITDUMP("Found suitable method table arg tree [%06u]\n", dspTreeID(methodTableArg)); GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY); if (localCopyThis != nullptr) { // Pass the local var as this and the type handle as a new arg // JITDUMP("Success! invoking unboxed entry point on local copy, and passing method table " "arg\n"); call->gtCallThisArg = gtNewCallArgs(localCopyThis); call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; // Prepend for R2L arg passing or empty L2R passing // Append for non-empty L2R // if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr)) { // If there's a ret buf, the method table is the second arg. // if (call->HasRetBufArg()) { gtInsertNewCallArgAfter(methodTableArg, call->gtCallArgs); } else { call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs); } } else { GenTreeCall::Use* beforeArg = call->gtCallArgs; while (beforeArg->GetNext() != nullptr) { beforeArg = beforeArg->GetNext(); } beforeArg->SetNext(gtNewCallArgs(methodTableArg)); } call->gtCallMethHnd = unboxedEntryMethod; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; // Method attributes will differ because unboxed entry point is shared // const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod); JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs, unboxedMethodAttribs); derivedMethodAttribs = unboxedMethodAttribs; optimizedTheBox = true; } else { JITDUMP("Sorry, failed to undo the box -- can't convert to local copy\n"); } } else { JITDUMP("Sorry, failed to undo the box -- can't find method table arg\n"); } } else { JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n"); GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY); if (localCopyThis != nullptr) { JITDUMP("Success! invoking unboxed entry point on local copy\n"); call->gtCallThisArg = gtNewCallArgs(localCopyThis); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; optimizedTheBox = true; } else { JITDUMP("Sorry, failed to undo the box\n"); } } if (optimizedTheBox) { #if FEATURE_TAILCALL_OPT if (call->IsImplicitTailCall()) { JITDUMP("Clearing the implicit tail call flag\n"); // If set, we clear the implicit tail call flag // as we just introduced a new address taken local variable // call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL; } #endif // FEATURE_TAILCALL_OPT } } if (!optimizedTheBox) { // If we get here, we have a boxed value class that either wasn't boxed // locally, or was boxed locally but we were unable to remove the box for // various reasons. // // We can still update the call to invoke the unboxed entry, if the // boxed value is simple. // if (requiresInstMethodTableArg) { // Get the method table from the boxed object. // GenTree* const thisArg = call->gtCallThisArg->GetNode(); GenTree* const clonedThisArg = gtClone(thisArg); if (clonedThisArg == nullptr) { JITDUMP( "unboxed entry needs MT arg, but `this` was too complex to clone. Deferring update.\n"); } else { JITDUMP("revising call to invoke unboxed entry with additional method table arg\n"); GenTree* const methodTableArg = gtNewMethodTableLookup(clonedThisArg); // Update the 'this' pointer to refer to the box payload // GenTree* const payloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* const boxPayload = gtNewOperNode(GT_ADD, TYP_BYREF, thisArg, payloadOffset); call->gtCallThisArg = gtNewCallArgs(boxPayload); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; // Method attributes will differ because unboxed entry point is shared // const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod); JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs, unboxedMethodAttribs); derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; derivedMethodAttribs = unboxedMethodAttribs; // Add the method table argument. // // Prepend for R2L arg passing or empty L2R passing // Append for non-empty L2R // if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr)) { // If there's a ret buf, the method table is the second arg. // if (call->HasRetBufArg()) { gtInsertNewCallArgAfter(methodTableArg, call->gtCallArgs); } else { call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs); } } else { GenTreeCall::Use* beforeArg = call->gtCallArgs; while (beforeArg->GetNext() != nullptr) { beforeArg = beforeArg->GetNext(); } beforeArg->SetNext(gtNewCallArgs(methodTableArg)); } } } else { JITDUMP("revising call to invoke unboxed entry\n"); GenTree* const thisArg = call->gtCallThisArg->GetNode(); GenTree* const payloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* const boxPayload = gtNewOperNode(GT_ADD, TYP_BYREF, thisArg, payloadOffset); call->gtCallThisArg = gtNewCallArgs(boxPayload); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; } } } else { // Many of the low-level methods on value classes won't have unboxed entries, // as they need access to the type of the object. // // Note this may be a cue for us to stack allocate the boxed object, since // we probably know that these objects don't escape. JITDUMP("Sorry, failed to find unboxed entry point\n"); } } } // Need to update call info too. // *method = derivedMethod; *methodFlags = derivedMethodAttribs; // Update context handle // *pContextHandle = MAKE_METHODCONTEXT(derivedMethod); // Update exact context handle. // if (pExactContextHandle != nullptr) { *pExactContextHandle = MAKE_CLASSCONTEXT(derivedClass); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // For R2R, getCallInfo triggers bookkeeping on the zap // side and acquires the actual symbol to call so we need to call it here. // Look up the new call info. CORINFO_CALL_INFO derivedCallInfo; eeGetCallInfo(pDerivedResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, &derivedCallInfo); // Update the call. call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT; call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT; call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup); } #endif // FEATURE_READYTORUN } //------------------------------------------------------------------------ // impGetSpecialIntrinsicExactReturnType: Look for special cases where a call // to an intrinsic returns an exact type // // Arguments: // methodHnd -- handle for the special intrinsic method // // Returns: // Exact class handle returned by the intrinsic call, if known. // Nullptr if not known, or not likely to lead to beneficial optimization. CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd) { JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd)); CORINFO_CLASS_HANDLE result = nullptr; // See what intrinisc we have... const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd); switch (ni) { case NI_System_Collections_Generic_Comparer_get_Default: case NI_System_Collections_Generic_EqualityComparer_get_Default: { // Expect one class generic parameter; figure out which it is. CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(methodHnd, &sig); assert(sig.sigInst.classInstCount == 1); CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0]; assert(typeHnd != nullptr); // Lookup can incorrect when we have __Canon as it won't appear // to implement any interface types. // // And if we do not have a final type, devirt & inlining is // unlikely to result in much simplification. // // We can use CORINFO_FLG_FINAL to screen out both of these cases. const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd); const bool isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0); if (isFinalType) { if (ni == NI_System_Collections_Generic_EqualityComparer_get_Default) { result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd); } else { assert(ni == NI_System_Collections_Generic_Comparer_get_Default); result = info.compCompHnd->getDefaultComparerClass(typeHnd); } JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd), result != nullptr ? eeGetClassName(result) : "unknown"); } else { JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd)); } break; } default: { JITDUMP("This special intrinsic not handled, sorry...\n"); break; } } return result; } //------------------------------------------------------------------------ // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it. // // Arguments: // token - init value for the allocated token. // // Return Value: // pointer to token into jit-allocated memory. CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(const CORINFO_RESOLVED_TOKEN& token) { CORINFO_RESOLVED_TOKEN* memory = getAllocator(CMK_Unknown).allocate<CORINFO_RESOLVED_TOKEN>(1); *memory = token; return memory; } //------------------------------------------------------------------------ // SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local variables. // class SpillRetExprHelper { public: SpillRetExprHelper(Compiler* comp) : comp(comp) { } void StoreRetExprResultsInArgs(GenTreeCall* call) { for (GenTreeCall::Use& use : call->Args()) { comp->fgWalkTreePre(&use.NodeRef(), SpillRetExprVisitor, this); } if (call->gtCallThisArg != nullptr) { comp->fgWalkTreePre(&call->gtCallThisArg->NodeRef(), SpillRetExprVisitor, this); } } private: static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre) { assert((pTree != nullptr) && (*pTree != nullptr)); GenTree* tree = *pTree; if ((tree->gtFlags & GTF_CALL) == 0) { // Trees with ret_expr are marked as GTF_CALL. return Compiler::WALK_SKIP_SUBTREES; } if (tree->OperGet() == GT_RET_EXPR) { SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData); walker->StoreRetExprAsLocalVar(pTree); } return Compiler::WALK_CONTINUE; } void StoreRetExprAsLocalVar(GenTree** pRetExpr) { GenTree* retExpr = *pRetExpr; assert(retExpr->OperGet() == GT_RET_EXPR); const unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr")); JITDUMP("Storing return expression [%06u] to a local var V%02u.\n", comp->dspTreeID(retExpr), tmp); comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE); *pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet()); if (retExpr->TypeGet() == TYP_REF) { assert(comp->lvaTable[tmp].lvSingleDef == 0); comp->lvaTable[tmp].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmp); bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE retClsHnd = comp->gtGetClassHandle(retExpr, &isExact, &isNonNull); if (retClsHnd != nullptr) { comp->lvaSetClass(tmp, retClsHnd, isExact); } } } private: Compiler* comp; }; //------------------------------------------------------------------------ // addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate. // Spill ret_expr in the call node, because they can't be cloned. // // Arguments: // call - fat calli candidate // void Compiler::addFatPointerCandidate(GenTreeCall* call) { JITDUMP("Marking call [%06u] as fat pointer candidate\n", dspTreeID(call)); setMethodHasFatPointer(); call->SetFatPointerCandidate(); SpillRetExprHelper helper(this); helper.StoreRetExprResultsInArgs(call); } //------------------------------------------------------------------------ // considerGuardedDevirtualization: see if we can profitably guess at the // class involved in an interface or virtual call. // // Arguments: // // call - potential guarded devirtualization candidate // ilOffset - IL ofset of the call instruction // isInterface - true if this is an interface call // baseMethod - target method of the call // baseClass - class that introduced the target method // pContextHandle - context handle for the call // objClass - class of 'this' in the call // objClassName - name of the obj Class // // Notes: // Consults with VM to see if there's a likely class at runtime, // if so, adds a candidate for guarded devirtualization. // void Compiler::considerGuardedDevirtualization( GenTreeCall* call, IL_OFFSET ilOffset, bool isInterface, CORINFO_METHOD_HANDLE baseMethod, CORINFO_CLASS_HANDLE baseClass, CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass) DEBUGARG(const char* objClassName)) { #if defined(DEBUG) const char* callKind = isInterface ? "interface" : "virtual"; #endif JITDUMP("Considering guarded devirtualization at IL offset %u (0x%x)\n", ilOffset, ilOffset); // We currently only get likely class guesses when there is PGO data // with class profiles. // if (fgPgoClassProfiles == 0) { JITDUMP("Not guessing for class: no class profile pgo data, or pgo disabled\n"); return; } // See if there's a likely guess for the class. // const unsigned likelihoodThreshold = isInterface ? 25 : 30; unsigned likelihood = 0; unsigned numberOfClasses = 0; CORINFO_CLASS_HANDLE likelyClass = NO_CLASS_HANDLE; bool doRandomDevirt = false; const int maxLikelyClasses = 32; LikelyClassRecord likelyClasses[maxLikelyClasses]; #ifdef DEBUG // Optional stress mode to pick a random known class, rather than // the most likely known class. // doRandomDevirt = JitConfig.JitRandomGuardedDevirtualization() != 0; if (doRandomDevirt) { // Reuse the random inliner's random state. // CLRRandom* const random = impInlineRoot()->m_inlineStrategy->GetRandom(JitConfig.JitRandomGuardedDevirtualization()); likelyClasses[0].clsHandle = getRandomClass(fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset, random); likelyClasses[0].likelihood = 100; if (likelyClasses[0].clsHandle != NO_CLASS_HANDLE) { numberOfClasses = 1; } } else #endif { numberOfClasses = getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset); } // For now we only use the most popular type likelihood = likelyClasses[0].likelihood; likelyClass = likelyClasses[0].clsHandle; if (numberOfClasses < 1) { JITDUMP("No likely class, sorry\n"); return; } assert(likelyClass != NO_CLASS_HANDLE); // Print all likely classes JITDUMP("%s classes for %p (%s):\n", doRandomDevirt ? "Random" : "Likely", dspPtr(objClass), objClassName) for (UINT32 i = 0; i < numberOfClasses; i++) { JITDUMP(" %u) %p (%s) [likelihood:%u%%]\n", i + 1, likelyClasses[i].clsHandle, eeGetClassName(likelyClasses[i].clsHandle), likelyClasses[i].likelihood); } // Todo: a more advanced heuristic using likelihood, number of // classes, and the profile count for this block. // // For now we will guess if the likelihood is at least 25%/30% (intfc/virt), as studies // have shown this transformation should pay off even if we guess wrong sometimes. // if (likelihood < likelihoodThreshold) { JITDUMP("Not guessing for class; likelihood is below %s call threshold %u\n", callKind, likelihoodThreshold); return; } uint32_t const likelyClassAttribs = info.compCompHnd->getClassAttribs(likelyClass); if ((likelyClassAttribs & CORINFO_FLG_ABSTRACT) != 0) { // We may see an abstract likely class, if we have a stale profile. // No point guessing for this. // JITDUMP("Not guessing for class; abstract (stale profile)\n"); return; } // Figure out which method will be called. // CORINFO_DEVIRTUALIZATION_INFO dvInfo; dvInfo.virtualMethod = baseMethod; dvInfo.objClass = likelyClass; dvInfo.context = *pContextHandle; dvInfo.exactContext = *pContextHandle; dvInfo.pResolvedTokenVirtualMethod = nullptr; const bool canResolve = info.compCompHnd->resolveVirtualMethod(&dvInfo); if (!canResolve) { JITDUMP("Can't figure out which method would be invoked, sorry\n"); return; } CORINFO_METHOD_HANDLE likelyMethod = dvInfo.devirtualizedMethod; JITDUMP("%s call would invoke method %s\n", callKind, eeGetMethodName(likelyMethod, nullptr)); // Add this as a potential candidate. // uint32_t const likelyMethodAttribs = info.compCompHnd->getMethodAttribs(likelyMethod); addGuardedDevirtualizationCandidate(call, likelyMethod, likelyClass, likelyMethodAttribs, likelyClassAttribs, likelihood); } //------------------------------------------------------------------------ // addGuardedDevirtualizationCandidate: potentially mark the call as a guarded // devirtualization candidate // // Notes: // // Call sites in rare or unoptimized code, and calls that require cookies are // not marked as candidates. // // As part of marking the candidate, the code spills GT_RET_EXPRs anywhere in any // child tree, because and we need to clone all these trees when we clone the call // as part of guarded devirtualization, and these IR nodes can't be cloned. // // Arguments: // call - potential guarded devirtualization candidate // methodHandle - method that will be invoked if the class test succeeds // classHandle - class that will be tested for at runtime // methodAttr - attributes of the method // classAttr - attributes of the class // likelihood - odds that this class is the class seen at runtime // void Compiler::addGuardedDevirtualizationCandidate(GenTreeCall* call, CORINFO_METHOD_HANDLE methodHandle, CORINFO_CLASS_HANDLE classHandle, unsigned methodAttr, unsigned classAttr, unsigned likelihood) { // This transformation only makes sense for virtual calls assert(call->IsVirtual()); // Only mark calls if the feature is enabled. const bool isEnabled = JitConfig.JitEnableGuardedDevirtualization() > 0; if (!isEnabled) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- disabled by jit config\n", dspTreeID(call)); return; } // Bail if not optimizing or the call site is very likely cold if (compCurBB->isRunRarely() || opts.OptimizationDisabled()) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- rare / dbg / minopts\n", dspTreeID(call)); return; } // CT_INDIRECT calls may use the cookie, bail if so... // // If transforming these provides a benefit, we could save this off in the same way // we save the stub address below. if ((call->gtCallType == CT_INDIRECT) && (call->AsCall()->gtCallCookie != nullptr)) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- CT_INDIRECT with cookie\n", dspTreeID(call)); return; } #ifdef DEBUG // See if disabled by range // static ConfigMethodRange JitGuardedDevirtualizationRange; JitGuardedDevirtualizationRange.EnsureInit(JitConfig.JitGuardedDevirtualizationRange()); assert(!JitGuardedDevirtualizationRange.Error()); if (!JitGuardedDevirtualizationRange.Contains(impInlineRoot()->info.compMethodHash())) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- excluded by " "JitGuardedDevirtualizationRange", dspTreeID(call)); return; } #endif // We're all set, proceed with candidate creation. // JITDUMP("Marking call [%06u] as guarded devirtualization candidate; will guess for class %s\n", dspTreeID(call), eeGetClassName(classHandle)); setMethodHasGuardedDevirtualization(); call->SetGuardedDevirtualizationCandidate(); // Spill off any GT_RET_EXPR subtrees so we can clone the call. // SpillRetExprHelper helper(this); helper.StoreRetExprResultsInArgs(call); // Gather some information for later. Note we actually allocate InlineCandidateInfo // here, as the devirtualized half of this call will likely become an inline candidate. // GuardedDevirtualizationCandidateInfo* pInfo = new (this, CMK_Inlining) InlineCandidateInfo; pInfo->guardedMethodHandle = methodHandle; pInfo->guardedMethodUnboxedEntryHandle = nullptr; pInfo->guardedClassHandle = classHandle; pInfo->likelihood = likelihood; pInfo->requiresInstMethodTableArg = false; // If the guarded class is a value class, look for an unboxed entry point. // if ((classAttr & CORINFO_FLG_VALUECLASS) != 0) { JITDUMP(" ... class is a value class, looking for unboxed entry\n"); bool requiresInstMethodTableArg = false; CORINFO_METHOD_HANDLE unboxedEntryMethodHandle = info.compCompHnd->getUnboxedEntry(methodHandle, &requiresInstMethodTableArg); if (unboxedEntryMethodHandle != nullptr) { JITDUMP(" ... updating GDV candidate with unboxed entry info\n"); pInfo->guardedMethodUnboxedEntryHandle = unboxedEntryMethodHandle; pInfo->requiresInstMethodTableArg = requiresInstMethodTableArg; } } call->gtGuardedDevirtualizationCandidateInfo = pInfo; } void Compiler::addExpRuntimeLookupCandidate(GenTreeCall* call) { setMethodHasExpRuntimeLookup(); call->SetExpRuntimeLookup(); } //------------------------------------------------------------------------ // impIsClassExact: check if a class handle can only describe values // of exactly one class. // // Arguments: // classHnd - handle for class in question // // Returns: // true if class is final and not subject to special casting from // variance or similar. // // Note: // We are conservative on arrays of primitive types here. bool Compiler::impIsClassExact(CORINFO_CLASS_HANDLE classHnd) { DWORD flags = info.compCompHnd->getClassAttribs(classHnd); DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY; if ((flags & flagsMask) == CORINFO_FLG_FINAL) { return true; } if ((flags & flagsMask) == (CORINFO_FLG_FINAL | CORINFO_FLG_ARRAY)) { CORINFO_CLASS_HANDLE arrayElementHandle = nullptr; CorInfoType type = info.compCompHnd->getChildType(classHnd, &arrayElementHandle); if ((type == CORINFO_TYPE_CLASS) || (type == CORINFO_TYPE_VALUECLASS)) { return impIsClassExact(arrayElementHandle); } } return false; } //------------------------------------------------------------------------ // impCanSkipCovariantStoreCheck: see if storing a ref type value to an array // can skip the array store covariance check. // // Arguments: // value -- tree producing the value to store // array -- tree representing the array to store to // // Returns: // true if the store does not require a covariance check. // bool Compiler::impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array) { // We should only call this when optimizing. assert(opts.OptimizationEnabled()); // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j] if (value->OperIs(GT_INDEX) && array->OperIs(GT_LCL_VAR)) { GenTree* valueIndex = value->AsIndex()->Arr(); if (valueIndex->OperIs(GT_LCL_VAR)) { unsigned valueLcl = valueIndex->AsLclVar()->GetLclNum(); unsigned arrayLcl = array->AsLclVar()->GetLclNum(); if ((valueLcl == arrayLcl) && !lvaGetDesc(arrayLcl)->IsAddressExposed()) { JITDUMP("\nstelem of ref from same array: skipping covariant store check\n"); return true; } } } // Check for assignment of NULL. if (value->OperIs(GT_CNS_INT)) { assert(value->gtType == TYP_REF); if (value->AsIntCon()->gtIconVal == 0) { JITDUMP("\nstelem of null: skipping covariant store check\n"); return true; } // Non-0 const refs can only occur with frozen objects assert(value->IsIconHandle(GTF_ICON_STR_HDL)); assert(doesMethodHaveFrozenString() || (compIsForInlining() && impInlineInfo->InlinerCompiler->doesMethodHaveFrozenString())); } // Try and get a class handle for the array if (value->gtType != TYP_REF) { return false; } bool arrayIsExact = false; bool arrayIsNonNull = false; CORINFO_CLASS_HANDLE arrayHandle = gtGetClassHandle(array, &arrayIsExact, &arrayIsNonNull); if (arrayHandle == NO_CLASS_HANDLE) { return false; } // There are some methods in corelib where we're storing to an array but the IL // doesn't reflect this (see SZArrayHelper). Avoid. DWORD attribs = info.compCompHnd->getClassAttribs(arrayHandle); if ((attribs & CORINFO_FLG_ARRAY) == 0) { return false; } CORINFO_CLASS_HANDLE arrayElementHandle = nullptr; CorInfoType arrayElemType = info.compCompHnd->getChildType(arrayHandle, &arrayElementHandle); // Verify array type handle is really an array of ref type assert(arrayElemType == CORINFO_TYPE_CLASS); // Check for exactly object[] if (arrayIsExact && (arrayElementHandle == impGetObjectClass())) { JITDUMP("\nstelem to (exact) object[]: skipping covariant store check\n"); return true; } const bool arrayTypeIsSealed = impIsClassExact(arrayElementHandle); if ((!arrayIsExact && !arrayTypeIsSealed) || (arrayElementHandle == NO_CLASS_HANDLE)) { // Bail out if we don't know array's exact type return false; } bool valueIsExact = false; bool valueIsNonNull = false; CORINFO_CLASS_HANDLE valueHandle = gtGetClassHandle(value, &valueIsExact, &valueIsNonNull); // Array's type is sealed and equals to value's type if (arrayTypeIsSealed && (valueHandle == arrayElementHandle)) { JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n"); return true; } // Array's type is not sealed but we know its exact type if (arrayIsExact && (valueHandle != NO_CLASS_HANDLE) && (info.compCompHnd->compareTypesForCast(valueHandle, arrayElementHandle) == TypeCompareState::Must)) { JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n"); return true; } return false; }
1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/jit/morph.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Morph XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "allocacheck.h" // for alloca // Convert the given node into a call to the specified helper passing // the given argument list. // // Tries to fold constants and also adds an edge for overflow exception // returns the morphed tree GenTree* Compiler::fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper) { GenTree* result; /* If the operand is a constant, we'll try to fold it */ if (oper->OperIsConst()) { GenTree* oldTree = tree; tree = gtFoldExprConst(tree); // This may not fold the constant (NaN ...) if (tree != oldTree) { return fgMorphTree(tree); } else if (tree->OperIsConst()) { return fgMorphConst(tree); } // assert that oper is unchanged and that it is still a GT_CAST node noway_assert(tree->AsCast()->CastOp() == oper); noway_assert(tree->gtOper == GT_CAST); } result = fgMorphIntoHelperCall(tree, helper, gtNewCallArgs(oper)); assert(result == tree); return result; } /***************************************************************************** * * Convert the given node into a call to the specified helper passing * the given argument list. */ GenTree* Compiler::fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs) { // The helper call ought to be semantically equivalent to the original node, so preserve its VN. tree->ChangeOper(GT_CALL, GenTree::PRESERVE_VN); GenTreeCall* call = tree->AsCall(); call->gtCallType = CT_HELPER; call->gtReturnType = tree->TypeGet(); call->gtCallMethHnd = eeFindHelper(helper); call->gtCallThisArg = nullptr; call->gtCallArgs = args; call->gtCallLateArgs = nullptr; call->fgArgInfo = nullptr; call->gtRetClsHnd = nullptr; call->gtCallMoreFlags = GTF_CALL_M_EMPTY; call->gtInlineCandidateInfo = nullptr; call->gtControlExpr = nullptr; #ifdef UNIX_X86_ABI call->gtFlags |= GTF_CALL_POP_ARGS; #endif // UNIX_X86_ABI #if DEBUG // Helper calls are never candidates. call->gtInlineObservation = InlineObservation::CALLSITE_IS_CALL_TO_HELPER; call->callSig = nullptr; #endif // DEBUG #ifdef FEATURE_READYTORUN call->gtEntryPoint.addr = nullptr; call->gtEntryPoint.accessType = IAT_VALUE; #endif #if FEATURE_MULTIREG_RET call->ResetReturnType(); call->ClearOtherRegs(); call->ClearOtherRegFlags(); #ifndef TARGET_64BIT if (varTypeIsLong(tree)) { call->InitializeLongReturnType(); } #endif // !TARGET_64BIT #endif // FEATURE_MULTIREG_RET if (tree->OperMayThrow(this)) { tree->gtFlags |= GTF_EXCEPT; } else { tree->gtFlags &= ~GTF_EXCEPT; } tree->gtFlags |= GTF_CALL; for (GenTreeCall::Use& use : GenTreeCall::UseList(args)) { tree->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT); } /* Perform the morphing */ if (morphArgs) { tree = fgMorphArgs(call); } return tree; } //------------------------------------------------------------------------ // fgMorphExpandCast: Performs the pre-order (required) morphing for a cast. // // Performs a rich variety of pre-order transformations (and some optimizations). // // Notably: // 1. Splits long -> small type casts into long -> int -> small type // for 32 bit targets. Does the same for float/double -> small type // casts for all targets. // 2. Morphs casts not supported by the target directly into helpers. // These mostly have to do with casts from and to floating point // types, especially checked ones. Refer to the implementation for // what specific casts need to be handled - it is a complex matrix. // 3. "Casts away" the GC-ness of a tree (for CAST(nint <- byref)) via // assigning the GC tree to an inline - COMMA(ASG, LCL_VAR) - non-GC // temporary. // 3. "Pushes down" truncating long -> int casts for some operations: // CAST(int <- MUL(long, long)) => MUL(CAST(int <- long), CAST(int <- long)). // The purpose of this is to allow "optNarrowTree" in the post-order // traversal to fold the tree into a TYP_INT one, which helps 32 bit // targets (and AMD64 too since 32 bit instructions are more compact). // TODO-Arm64-CQ: Re-evaluate the value of this optimization for ARM64. // // Arguments: // tree - the cast tree to morph // // Return Value: // The fully morphed tree, or "nullptr" if it needs further morphing, // in which case the cast may be transformed into an unchecked one // and its operand changed (the cast "expanded" into two). // GenTree* Compiler::fgMorphExpandCast(GenTreeCast* tree) { GenTree* oper = tree->CastOp(); if (fgGlobalMorph && (oper->gtOper == GT_ADDR)) { // Make sure we've checked if 'oper' is an address of an implicit-byref parameter. // If it is, fgMorphImplicitByRefArgs will change its type, and we want the cast // morphing code to see that type. fgMorphImplicitByRefArgs(oper); } var_types srcType = genActualType(oper); var_types dstType = tree->CastToType(); unsigned dstSize = genTypeSize(dstType); // See if the cast has to be done in two steps. R -> I if (varTypeIsFloating(srcType) && varTypeIsIntegral(dstType)) { if (srcType == TYP_FLOAT #if defined(TARGET_ARM64) // Arm64: src = float, dst is overflow conversion. // This goes through helper and hence src needs to be converted to double. && tree->gtOverflow() #elif defined(TARGET_AMD64) // Amd64: src = float, dst = uint64 or overflow conversion. // This goes through helper and hence src needs to be converted to double. && (tree->gtOverflow() || (dstType == TYP_ULONG)) #elif defined(TARGET_ARM) // Arm: src = float, dst = int64/uint64 or overflow conversion. && (tree->gtOverflow() || varTypeIsLong(dstType)) #else // x86: src = float, dst = uint32/int64/uint64 or overflow conversion. && (tree->gtOverflow() || varTypeIsLong(dstType) || (dstType == TYP_UINT)) #endif ) { oper = gtNewCastNode(TYP_DOUBLE, oper, false, TYP_DOUBLE); } // Do we need to do it in two steps R -> I -> smallType? if (dstSize < genTypeSize(TYP_INT)) { oper = gtNewCastNodeL(TYP_INT, oper, /* fromUnsigned */ false, TYP_INT); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->AsCast()->CastOp() = oper; // We must not mistreat the original cast, which was from a floating point type, // as from an unsigned type, since we now have a TYP_INT node for the source and // CAST_OVF(BYTE <- INT) != CAST_OVF(BYTE <- UINT). assert(!tree->IsUnsigned()); } else { if (!tree->gtOverflow()) { #ifdef TARGET_ARM64 // ARM64 supports all non-overflow checking conversions directly. return nullptr; #else switch (dstType) { case TYP_INT: return nullptr; case TYP_UINT: #if defined(TARGET_ARM) || defined(TARGET_AMD64) return nullptr; #else // TARGET_X86 return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT, oper); #endif // TARGET_X86 case TYP_LONG: #ifdef TARGET_AMD64 // SSE2 has instructions to convert a float/double directly to a long return nullptr; #else // !TARGET_AMD64 return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG, oper); #endif // !TARGET_AMD64 case TYP_ULONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG, oper); default: unreached(); } #endif // TARGET_ARM64 } else { switch (dstType) { case TYP_INT: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2INT_OVF, oper); case TYP_UINT: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT_OVF, oper); case TYP_LONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG_OVF, oper); case TYP_ULONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG_OVF, oper); default: unreached(); } } } } #ifndef TARGET_64BIT // The code generation phase (for x86 & ARM32) does not handle casts // directly from [u]long to anything other than [u]int. Insert an // intermediate cast to native int. else if (varTypeIsLong(srcType) && varTypeIsSmall(dstType)) { oper = gtNewCastNode(TYP_I_IMPL, oper, tree->IsUnsigned(), TYP_I_IMPL); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->ClearUnsigned(); tree->AsCast()->CastOp() = oper; } #endif //! TARGET_64BIT #ifdef TARGET_ARMARCH // AArch, unlike x86/amd64, has instructions that can cast directly from // all integers (except for longs on AArch32 of course) to floats. // Because there is no IL instruction conv.r4.un, uint/ulong -> float // casts are always imported as CAST(float <- CAST(double <- uint/ulong)). // We can eliminate the redundant intermediate cast as an optimization. else if ((dstType == TYP_FLOAT) && (srcType == TYP_DOUBLE) && oper->OperIs(GT_CAST) #ifdef TARGET_ARM && !varTypeIsLong(oper->AsCast()->CastOp()) #endif ) { oper->gtType = TYP_FLOAT; oper->CastToType() = TYP_FLOAT; return fgMorphTree(oper); } #endif // TARGET_ARMARCH #ifdef TARGET_ARM // converts long/ulong --> float/double casts into helper calls. else if (varTypeIsFloating(dstType) && varTypeIsLong(srcType)) { if (dstType == TYP_FLOAT) { // there is only a double helper, so we // - change the dsttype to double // - insert a cast from double to float // - recurse into the resulting tree tree->CastToType() = TYP_DOUBLE; tree->gtType = TYP_DOUBLE; tree = gtNewCastNode(TYP_FLOAT, tree, false, TYP_FLOAT); return fgMorphTree(tree); } if (tree->gtFlags & GTF_UNSIGNED) return fgMorphCastIntoHelper(tree, CORINFO_HELP_ULNG2DBL, oper); return fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper); } #endif // TARGET_ARM #ifdef TARGET_AMD64 // Do we have to do two step U4/8 -> R4/8 ? // Codegen supports the following conversion as one-step operation // a) Long -> R4/R8 // b) U8 -> R8 // // The following conversions are performed as two-step operations using above. // U4 -> R4/8 = U4-> Long -> R4/8 // U8 -> R4 = U8 -> R8 -> R4 else if (tree->IsUnsigned() && varTypeIsFloating(dstType)) { srcType = varTypeToUnsigned(srcType); if (srcType == TYP_ULONG) { if (dstType == TYP_FLOAT) { // Codegen can handle U8 -> R8 conversion. // U8 -> R4 = U8 -> R8 -> R4 // - change the dsttype to double // - insert a cast from double to float // - recurse into the resulting tree tree->CastToType() = TYP_DOUBLE; tree->gtType = TYP_DOUBLE; tree = gtNewCastNode(TYP_FLOAT, tree, false, TYP_FLOAT); return fgMorphTree(tree); } } else if (srcType == TYP_UINT) { oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->ClearUnsigned(); tree->CastOp() = oper; } } #endif // TARGET_AMD64 #ifdef TARGET_X86 // Do we have to do two step U4/8 -> R4/8 ? else if (tree->IsUnsigned() && varTypeIsFloating(dstType)) { srcType = varTypeToUnsigned(srcType); if (srcType == TYP_ULONG) { return fgMorphCastIntoHelper(tree, CORINFO_HELP_ULNG2DBL, oper); } else if (srcType == TYP_UINT) { oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->gtFlags &= ~GTF_UNSIGNED; return fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper); } } else if (((tree->gtFlags & GTF_UNSIGNED) == 0) && (srcType == TYP_LONG) && varTypeIsFloating(dstType)) { oper = fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper); // Since we don't have a Jit Helper that converts to a TYP_FLOAT // we just use the one that converts to a TYP_DOUBLE // and then add a cast to TYP_FLOAT // if ((dstType == TYP_FLOAT) && (oper->OperGet() == GT_CALL)) { // Fix the return type to be TYP_DOUBLE // oper->gtType = TYP_DOUBLE; // Add a Cast to TYP_FLOAT // tree = gtNewCastNode(TYP_FLOAT, oper, false, TYP_FLOAT); INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return tree; } else { return oper; } } #endif // TARGET_X86 else if (varTypeIsGC(srcType) != varTypeIsGC(dstType)) { // We are casting away GC information. we would like to just // change the type to int, however this gives the emitter fits because // it believes the variable is a GC variable at the beginning of the // instruction group, but is not turned non-gc by the code generator // we fix this by copying the GC pointer to a non-gc pointer temp. noway_assert(!varTypeIsGC(dstType) && "How can we have a cast to a GCRef here?"); // We generate an assignment to an int and then do the cast from an int. With this we avoid // the gc problem and we allow casts to bytes, longs, etc... unsigned lclNum = lvaGrabTemp(true DEBUGARG("Cast away GC")); oper->gtType = TYP_I_IMPL; GenTree* asg = gtNewTempAssign(lclNum, oper); oper->gtType = srcType; // do the real cast GenTree* cast = gtNewCastNode(tree->TypeGet(), gtNewLclvNode(lclNum, TYP_I_IMPL), false, dstType); // Generate the comma tree oper = gtNewOperNode(GT_COMMA, tree->TypeGet(), asg, cast); return fgMorphTree(oper); } // Look for narrowing casts ([u]long -> [u]int) and try to push them // down into the operand before morphing it. // // It doesn't matter if this is cast is from ulong or long (i.e. if // GTF_UNSIGNED is set) because the transformation is only applied to // overflow-insensitive narrowing casts, which always silently truncate. // // Note that casts from [u]long to small integer types are handled above. if ((srcType == TYP_LONG) && ((dstType == TYP_INT) || (dstType == TYP_UINT))) { // As a special case, look for overflow-sensitive casts of an AND // expression, and see if the second operand is a small constant. Since // the result of an AND is bound by its smaller operand, it may be // possible to prove that the cast won't overflow, which will in turn // allow the cast's operand to be transformed. if (tree->gtOverflow() && (oper->OperGet() == GT_AND)) { GenTree* andOp2 = oper->AsOp()->gtOp2; // Look for a constant less than 2^{32} for a cast to uint, or less // than 2^{31} for a cast to int. int maxWidth = (dstType == TYP_UINT) ? 32 : 31; if ((andOp2->OperGet() == GT_CNS_NATIVELONG) && ((andOp2->AsIntConCommon()->LngValue() >> maxWidth) == 0)) { tree->ClearOverflow(); tree->SetAllEffectsFlags(oper); } } // Only apply this transformation during global morph, // when neither the cast node nor the oper node may throw an exception // based on the upper 32 bits. // if (fgGlobalMorph && !tree->gtOverflow() && !oper->gtOverflowEx()) { // For these operations the lower 32 bits of the result only depends // upon the lower 32 bits of the operands. // bool canPushCast = oper->OperIs(GT_ADD, GT_SUB, GT_MUL, GT_AND, GT_OR, GT_XOR, GT_NOT, GT_NEG); // For long LSH cast to int, there is a discontinuity in behavior // when the shift amount is 32 or larger. // // CAST(INT, LSH(1LL, 31)) == LSH(1, 31) // LSH(CAST(INT, 1LL), CAST(INT, 31)) == LSH(1, 31) // // CAST(INT, LSH(1LL, 32)) == 0 // LSH(CAST(INT, 1LL), CAST(INT, 32)) == LSH(1, 32) == LSH(1, 0) == 1 // // So some extra validation is needed. // if (oper->OperIs(GT_LSH)) { GenTree* shiftAmount = oper->AsOp()->gtOp2; // Expose constant value for shift, if possible, to maximize the number // of cases we can handle. shiftAmount = gtFoldExpr(shiftAmount); oper->AsOp()->gtOp2 = shiftAmount; #if DEBUG // We may remorph the shift amount tree again later, so clear any morphed flag. shiftAmount->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; #endif // DEBUG if (shiftAmount->IsIntegralConst()) { const ssize_t shiftAmountValue = shiftAmount->AsIntCon()->IconValue(); if ((shiftAmountValue >= 64) || (shiftAmountValue < 0)) { // Shift amount is large enough or negative so result is undefined. // Don't try to optimize. assert(!canPushCast); } else if (shiftAmountValue >= 32) { // We know that we have a narrowing cast ([u]long -> [u]int) // and that we are casting to a 32-bit value, which will result in zero. // // Check to see if we have any side-effects that we must keep // if ((tree->gtFlags & GTF_ALL_EFFECT) == 0) { // Result of the shift is zero. DEBUG_DESTROY_NODE(tree); GenTree* zero = gtNewZeroConNode(TYP_INT); return fgMorphTree(zero); } else // We do have a side-effect { // We could create a GT_COMMA node here to keep the side-effect and return a zero // Instead we just don't try to optimize this case. canPushCast = false; } } else { // Shift amount is positive and small enough that we can push the cast through. canPushCast = true; } } else { // Shift amount is unknown. We can't optimize this case. assert(!canPushCast); } } if (canPushCast) { DEBUG_DESTROY_NODE(tree); // Insert narrowing casts for op1 and op2. oper->AsOp()->gtOp1 = gtNewCastNode(TYP_INT, oper->AsOp()->gtOp1, false, dstType); if (oper->AsOp()->gtOp2 != nullptr) { oper->AsOp()->gtOp2 = gtNewCastNode(TYP_INT, oper->AsOp()->gtOp2, false, dstType); } // Clear the GT_MUL_64RSLT if it is set. if (oper->gtOper == GT_MUL && (oper->gtFlags & GTF_MUL_64RSLT)) { oper->gtFlags &= ~GTF_MUL_64RSLT; } // The operation now produces a 32-bit result. oper->gtType = TYP_INT; // Remorph the new tree as the casts that we added may be folded away. return fgMorphTree(oper); } } } return nullptr; } #ifdef DEBUG const char* getNonStandardArgKindName(NonStandardArgKind kind) { switch (kind) { case NonStandardArgKind::None: return "None"; case NonStandardArgKind::PInvokeFrame: return "PInvokeFrame"; case NonStandardArgKind::PInvokeTarget: return "PInvokeTarget"; case NonStandardArgKind::PInvokeCookie: return "PInvokeCookie"; case NonStandardArgKind::WrapperDelegateCell: return "WrapperDelegateCell"; case NonStandardArgKind::ShiftLow: return "ShiftLow"; case NonStandardArgKind::ShiftHigh: return "ShiftHigh"; case NonStandardArgKind::FixedRetBuffer: return "FixedRetBuffer"; case NonStandardArgKind::VirtualStubCell: return "VirtualStubCell"; case NonStandardArgKind::R2RIndirectionCell: return "R2RIndirectionCell"; case NonStandardArgKind::ValidateIndirectCallTarget: return "ValidateIndirectCallTarget"; default: unreached(); } } void fgArgTabEntry::Dump() const { printf("fgArgTabEntry[arg %u", argNum); printf(" %d.%s", GetNode()->gtTreeID, GenTree::OpName(GetNode()->OperGet())); printf(" %s", varTypeName(argType)); printf(" (%s)", passedByRef ? "By ref" : "By value"); if (GetRegNum() != REG_STK) { printf(", %u reg%s:", numRegs, numRegs == 1 ? "" : "s"); for (unsigned i = 0; i < numRegs; i++) { printf(" %s", getRegName(regNums[i])); } } if (GetStackByteSize() > 0) { #if defined(DEBUG_ARG_SLOTS) printf(", numSlots=%u, slotNum=%u, byteSize=%u, byteOffset=%u", numSlots, slotNum, m_byteSize, m_byteOffset); #else printf(", byteSize=%u, byteOffset=%u", m_byteSize, m_byteOffset); #endif } printf(", byteAlignment=%u", m_byteAlignment); if (isLateArg()) { printf(", lateArgInx=%u", GetLateArgInx()); } if (IsSplit()) { printf(", isSplit"); } if (needTmp) { printf(", tmpNum=V%02u", tmpNum); } if (needPlace) { printf(", needPlace"); } if (isTmp) { printf(", isTmp"); } if (processed) { printf(", processed"); } if (IsHfaRegArg()) { printf(", isHfa(%s)", varTypeName(GetHfaType())); } if (isBackFilled) { printf(", isBackFilled"); } if (nonStandardArgKind != NonStandardArgKind::None) { printf(", nonStandard[%s]", getNonStandardArgKindName(nonStandardArgKind)); } if (isStruct) { printf(", isStruct"); } printf("]\n"); } #endif fgArgInfo::fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned numArgs) { compiler = comp; callTree = call; argCount = 0; // filled in arg count, starts at zero DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;) nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE; stkLevel = 0; #if defined(UNIX_X86_ABI) alignmentDone = false; stkSizeBytes = 0; padStkAlign = 0; #endif #if FEATURE_FIXED_OUT_ARGS outArgSize = 0; #endif argTableSize = numArgs; // the allocated table size hasRegArgs = false; hasStackArgs = false; argsComplete = false; argsSorted = false; needsTemps = false; if (argTableSize == 0) { argTable = nullptr; } else { argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argTableSize]; } } /***************************************************************************** * * fgArgInfo Copy Constructor * * This method needs to act like a copy constructor for fgArgInfo. * The newCall needs to have its fgArgInfo initialized such that * we have newCall that is an exact copy of the oldCall. * We have to take care since the argument information * in the argTable contains pointers that must point to the * new arguments and not the old arguments. */ fgArgInfo::fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall) { fgArgInfo* oldArgInfo = oldCall->AsCall()->fgArgInfo; compiler = oldArgInfo->compiler; callTree = newCall; argCount = 0; // filled in arg count, starts at zero DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;) nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE; stkLevel = oldArgInfo->stkLevel; #if defined(UNIX_X86_ABI) alignmentDone = oldArgInfo->alignmentDone; stkSizeBytes = oldArgInfo->stkSizeBytes; padStkAlign = oldArgInfo->padStkAlign; #endif #if FEATURE_FIXED_OUT_ARGS outArgSize = oldArgInfo->outArgSize; #endif argTableSize = oldArgInfo->argTableSize; argsComplete = false; argTable = nullptr; assert(oldArgInfo->argsComplete); if (argTableSize > 0) { argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argTableSize]; // Copy the old arg entries for (unsigned i = 0; i < argTableSize; i++) { argTable[i] = new (compiler, CMK_fgArgInfo) fgArgTabEntry(*oldArgInfo->argTable[i]); } // The copied arg entries contain pointers to old uses, they need // to be updated to point to new uses. if (newCall->gtCallThisArg != nullptr) { for (unsigned i = 0; i < argTableSize; i++) { if (argTable[i]->use == oldCall->gtCallThisArg) { argTable[i]->use = newCall->gtCallThisArg; break; } } } GenTreeCall::UseIterator newUse = newCall->Args().begin(); GenTreeCall::UseIterator newUseEnd = newCall->Args().end(); GenTreeCall::UseIterator oldUse = oldCall->Args().begin(); GenTreeCall::UseIterator oldUseEnd = newCall->Args().end(); for (; newUse != newUseEnd; ++newUse, ++oldUse) { for (unsigned i = 0; i < argTableSize; i++) { if (argTable[i]->use == oldUse.GetUse()) { argTable[i]->use = newUse.GetUse(); break; } } } newUse = newCall->LateArgs().begin(); newUseEnd = newCall->LateArgs().end(); oldUse = oldCall->LateArgs().begin(); oldUseEnd = newCall->LateArgs().end(); for (; newUse != newUseEnd; ++newUse, ++oldUse) { for (unsigned i = 0; i < argTableSize; i++) { if (argTable[i]->lateUse == oldUse.GetUse()) { argTable[i]->lateUse = newUse.GetUse(); break; } } } } argCount = oldArgInfo->argCount; DEBUG_ARG_SLOTS_ONLY(nextSlotNum = oldArgInfo->nextSlotNum;) nextStackByteOffset = oldArgInfo->nextStackByteOffset; hasRegArgs = oldArgInfo->hasRegArgs; hasStackArgs = oldArgInfo->hasStackArgs; argsComplete = true; argsSorted = true; } void fgArgInfo::AddArg(fgArgTabEntry* curArgTabEntry) { assert(argCount < argTableSize); argTable[argCount] = curArgTabEntry; argCount++; } fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg /*=false*/) { fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry; // Any additional register numbers are set by the caller. // This is primarily because on ARM we don't yet know if it // will be split or if it is a double HFA, so the number of registers // may actually be less. curArgTabEntry->setRegNum(0, regNum); curArgTabEntry->argNum = argNum; curArgTabEntry->argType = node->TypeGet(); curArgTabEntry->use = use; curArgTabEntry->lateUse = nullptr; curArgTabEntry->numRegs = numRegs; #if defined(DEBUG_ARG_SLOTS) curArgTabEntry->slotNum = 0; curArgTabEntry->numSlots = 0; #endif curArgTabEntry->SetLateArgInx(UINT_MAX); curArgTabEntry->tmpNum = BAD_VAR_NUM; curArgTabEntry->SetSplit(false); curArgTabEntry->isTmp = false; curArgTabEntry->needTmp = false; curArgTabEntry->needPlace = false; curArgTabEntry->processed = false; if (GlobalJitOptions::compFeatureHfa) { curArgTabEntry->SetHfaElemKind(CORINFO_HFA_ELEM_NONE); } curArgTabEntry->isBackFilled = false; curArgTabEntry->nonStandardArgKind = NonStandardArgKind::None; curArgTabEntry->isStruct = isStruct; curArgTabEntry->SetIsVararg(isVararg); curArgTabEntry->SetByteAlignment(byteAlignment); curArgTabEntry->SetByteSize(byteSize, isStruct, isFloatHfa); curArgTabEntry->SetByteOffset(0); hasRegArgs = true; if (argCount >= argTableSize) { fgArgTabEntry** oldTable = argTable; argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argCount + 1]; memcpy(argTable, oldTable, argCount * sizeof(fgArgTabEntry*)); argTableSize++; } AddArg(curArgTabEntry); return curArgTabEntry; } #if defined(UNIX_AMD64_ABI) fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, const bool isStruct, const bool isFloatHfa, const bool isVararg, const regNumber otherRegNum, const unsigned structIntRegs, const unsigned structFloatRegs, const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr) { fgArgTabEntry* curArgTabEntry = AddRegArg(argNum, node, use, regNum, numRegs, byteSize, byteAlignment, isStruct, isFloatHfa, isVararg); assert(curArgTabEntry != nullptr); curArgTabEntry->isStruct = isStruct; // is this a struct arg curArgTabEntry->structIntRegs = structIntRegs; curArgTabEntry->structFloatRegs = structFloatRegs; INDEBUG(curArgTabEntry->checkIsStruct();) assert(numRegs <= 2); if (numRegs == 2) { curArgTabEntry->setRegNum(1, otherRegNum); } if (isStruct && structDescPtr != nullptr) { curArgTabEntry->structDesc.CopyFrom(*structDescPtr); } return curArgTabEntry; } #endif // defined(UNIX_AMD64_ABI) fgArgTabEntry* fgArgInfo::AddStkArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, unsigned numSlots, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg /*=false*/) { fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry; #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { nextSlotNum = roundUp(nextSlotNum, byteAlignment / TARGET_POINTER_SIZE); } #endif nextStackByteOffset = roundUp(nextStackByteOffset, byteAlignment); DEBUG_ARG_SLOTS_ASSERT(nextStackByteOffset / TARGET_POINTER_SIZE == nextSlotNum); curArgTabEntry->setRegNum(0, REG_STK); curArgTabEntry->argNum = argNum; curArgTabEntry->argType = node->TypeGet(); curArgTabEntry->use = use; curArgTabEntry->lateUse = nullptr; #if defined(DEBUG_ARG_SLOTS) curArgTabEntry->numSlots = numSlots; curArgTabEntry->slotNum = nextSlotNum; #endif curArgTabEntry->numRegs = 0; #if defined(UNIX_AMD64_ABI) curArgTabEntry->structIntRegs = 0; curArgTabEntry->structFloatRegs = 0; #endif // defined(UNIX_AMD64_ABI) curArgTabEntry->SetLateArgInx(UINT_MAX); curArgTabEntry->tmpNum = BAD_VAR_NUM; curArgTabEntry->SetSplit(false); curArgTabEntry->isTmp = false; curArgTabEntry->needTmp = false; curArgTabEntry->needPlace = false; curArgTabEntry->processed = false; if (GlobalJitOptions::compFeatureHfa) { curArgTabEntry->SetHfaElemKind(CORINFO_HFA_ELEM_NONE); } curArgTabEntry->isBackFilled = false; curArgTabEntry->nonStandardArgKind = NonStandardArgKind::None; curArgTabEntry->isStruct = isStruct; curArgTabEntry->SetIsVararg(isVararg); curArgTabEntry->SetByteAlignment(byteAlignment); curArgTabEntry->SetByteSize(byteSize, isStruct, isFloatHfa); curArgTabEntry->SetByteOffset(nextStackByteOffset); hasStackArgs = true; AddArg(curArgTabEntry); DEBUG_ARG_SLOTS_ONLY(nextSlotNum += numSlots;) nextStackByteOffset += curArgTabEntry->GetByteSize(); return curArgTabEntry; } void fgArgInfo::RemorphReset() { DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;) nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE; } //------------------------------------------------------------------------ // UpdateRegArg: Update the given fgArgTabEntry while morphing. // // Arguments: // curArgTabEntry - the fgArgTabEntry to update. // node - the tree node that defines the argument // reMorphing - a boolean value indicate whether we are remorphing the call // // Assumptions: // This must have already been determined to be at least partially passed in registers. // void fgArgInfo::UpdateRegArg(fgArgTabEntry* curArgTabEntry, GenTree* node, bool reMorphing) { bool isLateArg = curArgTabEntry->isLateArg(); // If this is a late arg, we'd better be updating it with a correctly marked node, and vice-versa. assert((isLateArg && ((node->gtFlags & GTF_LATE_ARG) != 0)) || (!isLateArg && ((node->gtFlags & GTF_LATE_ARG) == 0))); assert(curArgTabEntry->numRegs != 0); assert(curArgTabEntry->use->GetNode() == node); } //------------------------------------------------------------------------ // UpdateStkArg: Update the given fgArgTabEntry while morphing. // // Arguments: // curArgTabEntry - the fgArgTabEntry to update. // node - the tree node that defines the argument // reMorphing - a boolean value indicate whether we are remorphing the call // // Assumptions: // This must have already been determined to be passed on the stack. // void fgArgInfo::UpdateStkArg(fgArgTabEntry* curArgTabEntry, GenTree* node, bool reMorphing) { bool isLateArg = curArgTabEntry->isLateArg(); // If this is a late arg, we'd better be updating it with a correctly marked node, and vice-versa. assert((isLateArg && ((node->gtFlags & GTF_LATE_ARG) != 0)) || (!isLateArg && ((node->gtFlags & GTF_LATE_ARG) == 0))); noway_assert(curArgTabEntry->use != callTree->gtCallThisArg); assert((curArgTabEntry->GetRegNum() == REG_STK) || curArgTabEntry->IsSplit()); assert(curArgTabEntry->use->GetNode() == node); #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { nextSlotNum = roundUp(nextSlotNum, curArgTabEntry->GetByteAlignment() / TARGET_POINTER_SIZE); assert(curArgTabEntry->slotNum == nextSlotNum); nextSlotNum += curArgTabEntry->numSlots; } #endif nextStackByteOffset = roundUp(nextStackByteOffset, curArgTabEntry->GetByteAlignment()); assert(curArgTabEntry->GetByteOffset() == nextStackByteOffset); nextStackByteOffset += curArgTabEntry->GetStackByteSize(); } void fgArgInfo::SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots) { fgArgTabEntry* curArgTabEntry = nullptr; assert(argNum < argCount); for (unsigned inx = 0; inx < argCount; inx++) { curArgTabEntry = argTable[inx]; if (curArgTabEntry->argNum == argNum) { break; } } assert(numRegs > 0); assert(numSlots > 0); if (argsComplete) { assert(curArgTabEntry->IsSplit() == true); assert(curArgTabEntry->numRegs == numRegs); DEBUG_ARG_SLOTS_ONLY(assert(curArgTabEntry->numSlots == numSlots);) assert(hasStackArgs == true); } else { curArgTabEntry->SetSplit(true); curArgTabEntry->numRegs = numRegs; DEBUG_ARG_SLOTS_ONLY(curArgTabEntry->numSlots = numSlots;) curArgTabEntry->SetByteOffset(0); hasStackArgs = true; } DEBUG_ARG_SLOTS_ONLY(nextSlotNum += numSlots;) // TODO-Cleanup: structs are aligned to 8 bytes on arm64 apple, so it would work, but pass the precise size. nextStackByteOffset += numSlots * TARGET_POINTER_SIZE; } //------------------------------------------------------------------------ // EvalToTmp: Replace the node in the given fgArgTabEntry with a temp // // Arguments: // curArgTabEntry - the fgArgTabEntry for the argument // tmpNum - the varNum for the temp // newNode - the assignment of the argument value to the temp // // Notes: // Although the name of this method is EvalToTmp, it doesn't actually create // the temp or the copy. // void fgArgInfo::EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode) { assert(curArgTabEntry->use != callTree->gtCallThisArg); assert(curArgTabEntry->use->GetNode() == newNode); assert(curArgTabEntry->GetNode() == newNode); curArgTabEntry->tmpNum = tmpNum; curArgTabEntry->isTmp = true; } void fgArgInfo::ArgsComplete() { bool hasStructRegArg = false; for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; assert(curArgTabEntry != nullptr); GenTree* argx = curArgTabEntry->GetNode(); if (curArgTabEntry->GetRegNum() == REG_STK) { assert(hasStackArgs == true); #if !FEATURE_FIXED_OUT_ARGS // On x86 we use push instructions to pass arguments: // The non-register arguments are evaluated and pushed in order // and they are never evaluated into temps // continue; #endif } #if FEATURE_ARG_SPLIT else if (curArgTabEntry->IsSplit()) { hasStructRegArg = true; assert(hasStackArgs == true); } #endif // FEATURE_ARG_SPLIT else // we have a register argument, next we look for a struct type. { if (varTypeIsStruct(argx) UNIX_AMD64_ABI_ONLY(|| curArgTabEntry->isStruct)) { hasStructRegArg = true; } } /* If the argument tree contains an assignment (GTF_ASG) then the argument and and every earlier argument (except constants) must be evaluated into temps since there may be other arguments that follow and they may use the value being assigned. EXAMPLE: ArgTab is "a, a=5, a" -> when we see the second arg "a=5" we know the first two arguments "a, a=5" have to be evaluated into temps For the case of an assignment, we only know that there exist some assignment someplace in the tree. We don't know what is being assigned so we are very conservative here and assume that any local variable could have been assigned. */ if (argx->gtFlags & GTF_ASG) { // If this is not the only argument, or it's a copyblk, or it already evaluates the expression to // a tmp, then we need a temp in the late arg list. if ((argCount > 1) || argx->OperIsCopyBlkOp() #ifdef FEATURE_FIXED_OUT_ARGS || curArgTabEntry->isTmp // I protect this by "FEATURE_FIXED_OUT_ARGS" to preserve the property // that we only have late non-register args when that feature is on. #endif // FEATURE_FIXED_OUT_ARGS ) { curArgTabEntry->needTmp = true; needsTemps = true; } // For all previous arguments, unless they are a simple constant // we require that they be evaluated into temps for (unsigned prevInx = 0; prevInx < curInx; prevInx++) { fgArgTabEntry* prevArgTabEntry = argTable[prevInx]; assert(prevArgTabEntry->argNum < curArgTabEntry->argNum); if (!prevArgTabEntry->GetNode()->IsInvariant()) { prevArgTabEntry->needTmp = true; needsTemps = true; } } } bool treatLikeCall = ((argx->gtFlags & GTF_CALL) != 0); #if FEATURE_FIXED_OUT_ARGS // Like calls, if this argument has a tree that will do an inline throw, // a call to a jit helper, then we need to treat it like a call (but only // if there are/were any stack args). // This means unnesting, sorting, etc. Technically this is overly // conservative, but I want to avoid as much special-case debug-only code // as possible, so leveraging the GTF_CALL flag is the easiest. // if (!treatLikeCall && (argx->gtFlags & GTF_EXCEPT) && (argCount > 1) && compiler->opts.compDbgCode && (compiler->fgWalkTreePre(&argx, Compiler::fgChkThrowCB) == Compiler::WALK_ABORT)) { for (unsigned otherInx = 0; otherInx < argCount; otherInx++) { if (otherInx == curInx) { continue; } if (argTable[otherInx]->GetRegNum() == REG_STK) { treatLikeCall = true; break; } } } #endif // FEATURE_FIXED_OUT_ARGS /* If it contains a call (GTF_CALL) then itself and everything before the call with a GLOB_EFFECT must eval to temp (this is because everything with SIDE_EFFECT has to be kept in the right order since we will move the call to the first position) For calls we don't have to be quite as conservative as we are with an assignment since the call won't be modifying any non-address taken LclVars. */ if (treatLikeCall) { if (argCount > 1) // If this is not the only argument { curArgTabEntry->needTmp = true; needsTemps = true; } else if (varTypeIsFloating(argx->TypeGet()) && (argx->OperGet() == GT_CALL)) { // Spill all arguments that are floating point calls curArgTabEntry->needTmp = true; needsTemps = true; } // All previous arguments may need to be evaluated into temps for (unsigned prevInx = 0; prevInx < curInx; prevInx++) { fgArgTabEntry* prevArgTabEntry = argTable[prevInx]; assert(prevArgTabEntry->argNum < curArgTabEntry->argNum); // For all previous arguments, if they have any GTF_ALL_EFFECT // we require that they be evaluated into a temp if ((prevArgTabEntry->GetNode()->gtFlags & GTF_ALL_EFFECT) != 0) { prevArgTabEntry->needTmp = true; needsTemps = true; } #if FEATURE_FIXED_OUT_ARGS // Or, if they are stored into the FIXED_OUT_ARG area // we require that they be moved to the gtCallLateArgs // and replaced with a placeholder node else if (prevArgTabEntry->GetRegNum() == REG_STK) { prevArgTabEntry->needPlace = true; } #if FEATURE_ARG_SPLIT else if (prevArgTabEntry->IsSplit()) { prevArgTabEntry->needPlace = true; } #endif // FEATURE_ARG_SPLIT #endif } } #if FEATURE_MULTIREG_ARGS // For RyuJIT backend we will expand a Multireg arg into a GT_FIELD_LIST // with multiple indirections, so here we consider spilling it into a tmp LclVar. // CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM bool isMultiRegArg = (curArgTabEntry->numRegs > 0) && (curArgTabEntry->numRegs + curArgTabEntry->GetStackSlotsNumber() > 1); #else bool isMultiRegArg = (curArgTabEntry->numRegs > 1); #endif if ((varTypeIsStruct(argx->TypeGet())) && (curArgTabEntry->needTmp == false)) { if (isMultiRegArg && ((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0)) { // Spill multireg struct arguments that have Assignments or Calls embedded in them curArgTabEntry->needTmp = true; needsTemps = true; } else { // We call gtPrepareCost to measure the cost of evaluating this tree compiler->gtPrepareCost(argx); if (isMultiRegArg && (argx->GetCostEx() > (6 * IND_COST_EX))) { // Spill multireg struct arguments that are expensive to evaluate twice curArgTabEntry->needTmp = true; needsTemps = true; } #if defined(FEATURE_SIMD) && defined(TARGET_ARM64) else if (isMultiRegArg && varTypeIsSIMD(argx->TypeGet())) { // SIMD types do not need the optimization below due to their sizes if (argx->OperIsSimdOrHWintrinsic() || (argx->OperIs(GT_OBJ) && argx->AsObj()->gtOp1->OperIs(GT_ADDR) && argx->AsObj()->gtOp1->AsOp()->gtOp1->OperIsSimdOrHWintrinsic())) { curArgTabEntry->needTmp = true; needsTemps = true; } } #endif #ifndef TARGET_ARM // TODO-Arm: This optimization is not implemented for ARM32 // so we skip this for ARM32 until it is ported to use RyuJIT backend // else if (argx->OperGet() == GT_OBJ) { GenTreeObj* argObj = argx->AsObj(); unsigned structSize = argObj->GetLayout()->GetSize(); switch (structSize) { case 3: case 5: case 6: case 7: // If we have a stack based LclVar we can perform a wider read of 4 or 8 bytes // if (argObj->AsObj()->gtOp1->IsLocalAddrExpr() == nullptr) // Is the source not a LclVar? { // If we don't have a LclVar we need to read exactly 3,5,6 or 7 bytes // For now we use a a GT_CPBLK to copy the exact size into a GT_LCL_VAR temp. // curArgTabEntry->needTmp = true; needsTemps = true; } break; case 11: case 13: case 14: case 15: // Spill any GT_OBJ multireg structs that are difficult to extract // // When we have a GT_OBJ of a struct with the above sizes we would need // to use 3 or 4 load instructions to load the exact size of this struct. // Instead we spill the GT_OBJ into a new GT_LCL_VAR temp and this sequence // will use a GT_CPBLK to copy the exact size into the GT_LCL_VAR temp. // Then we can just load all 16 bytes of the GT_LCL_VAR temp when passing // the argument. // curArgTabEntry->needTmp = true; needsTemps = true; break; default: break; } } #endif // !TARGET_ARM } } #endif // FEATURE_MULTIREG_ARGS } // We only care because we can't spill structs and qmarks involve a lot of spilling, but // if we don't have qmarks, then it doesn't matter. // So check for Qmark's globally once here, instead of inside the loop. // const bool hasStructRegArgWeCareAbout = (hasStructRegArg && compiler->compQmarkUsed); #if FEATURE_FIXED_OUT_ARGS // For Arm/x64 we only care because we can't reorder a register // argument that uses GT_LCLHEAP. This is an optimization to // save a check inside the below loop. // const bool hasStackArgsWeCareAbout = (hasStackArgs && compiler->compLocallocUsed); #else const bool hasStackArgsWeCareAbout = hasStackArgs; #endif // FEATURE_FIXED_OUT_ARGS // If we have any stack args we have to force the evaluation // of any arguments passed in registers that might throw an exception // // Technically we only a required to handle the following two cases: // a GT_IND with GTF_IND_RNGCHK (only on x86) or // a GT_LCLHEAP node that allocates stuff on the stack // if (hasStackArgsWeCareAbout || hasStructRegArgWeCareAbout) { for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; assert(curArgTabEntry != nullptr); GenTree* argx = curArgTabEntry->GetNode(); // Examine the register args that are currently not marked needTmp // if (!curArgTabEntry->needTmp && (curArgTabEntry->GetRegNum() != REG_STK)) { if (hasStackArgsWeCareAbout) { #if !FEATURE_FIXED_OUT_ARGS // On x86 we previously recorded a stack depth of zero when // morphing the register arguments of any GT_IND with a GTF_IND_RNGCHK flag // Thus we can not reorder the argument after any stack based argument // (Note that GT_LCLHEAP sets the GTF_EXCEPT flag so we don't need to // check for it explicitly.) // if (argx->gtFlags & GTF_EXCEPT) { curArgTabEntry->needTmp = true; needsTemps = true; continue; } #else // For Arm/X64 we can't reorder a register argument that uses a GT_LCLHEAP // if (argx->gtFlags & GTF_EXCEPT) { assert(compiler->compLocallocUsed); // Returns WALK_ABORT if a GT_LCLHEAP node is encountered in the argx tree // if (compiler->fgWalkTreePre(&argx, Compiler::fgChkLocAllocCB) == Compiler::WALK_ABORT) { curArgTabEntry->needTmp = true; needsTemps = true; continue; } } #endif } if (hasStructRegArgWeCareAbout) { // Returns true if a GT_QMARK node is encountered in the argx tree // if (compiler->fgWalkTreePre(&argx, Compiler::fgChkQmarkCB) == Compiler::WALK_ABORT) { curArgTabEntry->needTmp = true; needsTemps = true; continue; } } } } } // When CFG is enabled and this is a delegate call or vtable call we must // compute the call target before all late args. However this will // effectively null-check 'this', which should happen only after all // arguments are evaluated. Thus we must evaluate all args with side // effects to a temp. if (compiler->opts.IsCFGEnabled() && (callTree->IsVirtualVtable() || callTree->IsDelegateInvoke())) { // Always evaluate 'this' to temp. argTable[0]->needTmp = true; needsTemps = true; for (unsigned curInx = 1; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; GenTree* arg = curArgTabEntry->GetNode(); if ((arg->gtFlags & GTF_ALL_EFFECT) != 0) { curArgTabEntry->needTmp = true; needsTemps = true; } } } argsComplete = true; } void fgArgInfo::SortArgs() { assert(argsComplete == true); #ifdef DEBUG if (compiler->verbose) { printf("\nSorting the arguments:\n"); } #endif /* Shuffle the arguments around before we build the gtCallLateArgs list. The idea is to move all "simple" arguments like constants and local vars to the end of the table, and move the complex arguments towards the beginning of the table. This will help prevent registers from being spilled by allowing us to evaluate the more complex arguments before the simpler arguments. The argTable ends up looking like: +------------------------------------+ <--- argTable[argCount - 1] | constants | +------------------------------------+ | local var / local field | +------------------------------------+ | remaining arguments sorted by cost | +------------------------------------+ | temps (argTable[].needTmp = true) | +------------------------------------+ | args with calls (GTF_CALL) | +------------------------------------+ <--- argTable[0] */ /* Set the beginning and end for the new argument table */ unsigned curInx; int regCount = 0; unsigned begTab = 0; unsigned endTab = argCount - 1; unsigned argsRemaining = argCount; // First take care of arguments that are constants. // [We use a backward iterator pattern] // curInx = argCount; do { curInx--; fgArgTabEntry* curArgTabEntry = argTable[curInx]; if (curArgTabEntry->GetRegNum() != REG_STK) { regCount++; } assert(curArgTabEntry->lateUse == nullptr); // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); // put constants at the end of the table // if (argx->gtOper == GT_CNS_INT) { noway_assert(curInx <= endTab); curArgTabEntry->processed = true; // place curArgTabEntry at the endTab position by performing a swap // if (curInx != endTab) { argTable[curInx] = argTable[endTab]; argTable[endTab] = curArgTabEntry; } endTab--; argsRemaining--; } } } while (curInx > 0); if (argsRemaining > 0) { // Next take care of arguments that are calls. // [We use a forward iterator pattern] // for (curInx = begTab; curInx <= endTab; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); // put calls at the beginning of the table // if (argx->gtFlags & GTF_CALL) { curArgTabEntry->processed = true; // place curArgTabEntry at the begTab position by performing a swap // if (curInx != begTab) { argTable[curInx] = argTable[begTab]; argTable[begTab] = curArgTabEntry; } begTab++; argsRemaining--; } } } } if (argsRemaining > 0) { // Next take care arguments that are temps. // These temps come before the arguments that are // ordinary local vars or local fields // since this will give them a better chance to become // enregistered into their actual argument register. // [We use a forward iterator pattern] // for (curInx = begTab; curInx <= endTab; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { if (curArgTabEntry->needTmp) { curArgTabEntry->processed = true; // place curArgTabEntry at the begTab position by performing a swap // if (curInx != begTab) { argTable[curInx] = argTable[begTab]; argTable[begTab] = curArgTabEntry; } begTab++; argsRemaining--; } } } } if (argsRemaining > 0) { // Next take care of local var and local field arguments. // These are moved towards the end of the argument evaluation. // [We use a backward iterator pattern] // curInx = endTab + 1; do { curInx--; fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); if ((argx->gtOper == GT_LCL_VAR) || (argx->gtOper == GT_LCL_FLD)) { noway_assert(curInx <= endTab); curArgTabEntry->processed = true; // place curArgTabEntry at the endTab position by performing a swap // if (curInx != endTab) { argTable[curInx] = argTable[endTab]; argTable[endTab] = curArgTabEntry; } endTab--; argsRemaining--; } } } while (curInx > begTab); } // Finally, take care of all the remaining arguments. // Note that we fill in one arg at a time using a while loop. bool costsPrepared = false; // Only prepare tree costs once, the first time through this loop while (argsRemaining > 0) { /* Find the most expensive arg remaining and evaluate it next */ fgArgTabEntry* expensiveArgTabEntry = nullptr; unsigned expensiveArg = UINT_MAX; unsigned expensiveArgCost = 0; // [We use a forward iterator pattern] // for (curInx = begTab; curInx <= endTab; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); // We should have already handled these kinds of args assert(argx->gtOper != GT_LCL_VAR); assert(argx->gtOper != GT_LCL_FLD); assert(argx->gtOper != GT_CNS_INT); // This arg should either have no persistent side effects or be the last one in our table // assert(((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) == 0) || (curInx == (argCount-1))); if (argsRemaining == 1) { // This is the last arg to place expensiveArg = curInx; expensiveArgTabEntry = curArgTabEntry; assert(begTab == endTab); break; } else { if (!costsPrepared) { /* We call gtPrepareCost to measure the cost of evaluating this tree */ compiler->gtPrepareCost(argx); } if (argx->GetCostEx() > expensiveArgCost) { // Remember this arg as the most expensive one that we have yet seen expensiveArgCost = argx->GetCostEx(); expensiveArg = curInx; expensiveArgTabEntry = curArgTabEntry; } } } } noway_assert(expensiveArg != UINT_MAX); // put the most expensive arg towards the beginning of the table expensiveArgTabEntry->processed = true; // place expensiveArgTabEntry at the begTab position by performing a swap // if (expensiveArg != begTab) { argTable[expensiveArg] = argTable[begTab]; argTable[begTab] = expensiveArgTabEntry; } begTab++; argsRemaining--; costsPrepared = true; // If we have more expensive arguments, don't re-evaluate the tree cost on the next loop } // The table should now be completely filled and thus begTab should now be adjacent to endTab // and regArgsRemaining should be zero assert(begTab == (endTab + 1)); assert(argsRemaining == 0); argsSorted = true; } #ifdef DEBUG void fgArgInfo::Dump(Compiler* compiler) const { for (unsigned curInx = 0; curInx < ArgCount(); curInx++) { fgArgTabEntry* curArgEntry = ArgTable()[curInx]; curArgEntry->Dump(); } } #endif //------------------------------------------------------------------------------ // fgMakeTmpArgNode : This function creates a tmp var only if needed. // We need this to be done in order to enforce ordering // of the evaluation of arguments. // // Arguments: // curArgTabEntry // // Return Value: // the newly created temp var tree. GenTree* Compiler::fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry) { unsigned tmpVarNum = curArgTabEntry->tmpNum; LclVarDsc* varDsc = lvaGetDesc(tmpVarNum); assert(varDsc->lvIsTemp); var_types type = varDsc->TypeGet(); // Create a copy of the temp to go into the late argument list GenTree* arg = gtNewLclvNode(tmpVarNum, type); GenTree* addrNode = nullptr; if (varTypeIsStruct(type)) { #if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) // Can this type be passed as a primitive type? // If so, the following call will return the corresponding primitive type. // Otherwise, it will return TYP_UNKNOWN and we will pass it as a struct type. bool passedAsPrimitive = false; if (curArgTabEntry->TryPassAsPrimitive()) { CORINFO_CLASS_HANDLE clsHnd = varDsc->GetStructHnd(); var_types structBaseType = getPrimitiveTypeForStruct(lvaLclExactSize(tmpVarNum), clsHnd, curArgTabEntry->IsVararg()); if (structBaseType != TYP_UNKNOWN) { passedAsPrimitive = true; #if defined(UNIX_AMD64_ABI) // TODO-Cleanup: This is inelegant, but eventually we'll track this in the fgArgTabEntry, // and otherwise we'd have to either modify getPrimitiveTypeForStruct() to take // a structDesc or call eeGetSystemVAmd64PassStructInRegisterDescriptor yet again. // if (genIsValidFloatReg(curArgTabEntry->GetRegNum())) { if (structBaseType == TYP_INT) { structBaseType = TYP_FLOAT; } else { assert(structBaseType == TYP_LONG); structBaseType = TYP_DOUBLE; } } #endif type = structBaseType; } } // If it is passed in registers, don't get the address of the var. Make it a // field instead. It will be loaded in registers with putarg_reg tree in lower. if (passedAsPrimitive) { arg->ChangeOper(GT_LCL_FLD); arg->gtType = type; lvaSetVarDoNotEnregister(tmpVarNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); } else { var_types addrType = TYP_BYREF; arg = gtNewOperNode(GT_ADDR, addrType, arg); lvaSetVarAddrExposed(tmpVarNum DEBUGARG(AddressExposedReason::ESCAPE_ADDRESS)); addrNode = arg; #if FEATURE_MULTIREG_ARGS #ifdef TARGET_ARM64 assert(varTypeIsStruct(type)); if (lvaIsMultiregStruct(varDsc, curArgTabEntry->IsVararg())) { // We will create a GT_OBJ for the argument below. // This will be passed by value in two registers. assert(addrNode != nullptr); // Create an Obj of the temp to use it as a call argument. arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg); } #else // Always create an Obj of the temp to use it as a call argument. arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg); #endif // !TARGET_ARM64 #endif // FEATURE_MULTIREG_ARGS } #else // not (TARGET_AMD64 or TARGET_ARM64 or TARGET_ARM) // other targets, we pass the struct by value assert(varTypeIsStruct(type)); addrNode = gtNewOperNode(GT_ADDR, TYP_BYREF, arg); // Get a new Obj node temp to use it as a call argument. // gtNewObjNode will set the GTF_EXCEPT flag if this is not a local stack object. arg = gtNewObjNode(lvaGetStruct(tmpVarNum), addrNode); #endif // not (TARGET_AMD64 or TARGET_ARM64 or TARGET_ARM) } // (varTypeIsStruct(type)) if (addrNode != nullptr) { assert(addrNode->gtOper == GT_ADDR); // the child of a GT_ADDR is required to have this flag set addrNode->AsOp()->gtOp1->gtFlags |= GTF_DONT_CSE; } return arg; } //------------------------------------------------------------------------------ // EvalArgsToTemps : Create temp assignments and populate the LateArgs list. void fgArgInfo::EvalArgsToTemps() { assert(argsSorted); unsigned regArgInx = 0; // Now go through the argument table and perform the necessary evaluation into temps GenTreeCall::Use* tmpRegArgNext = nullptr; for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; assert(curArgTabEntry->lateUse == nullptr); GenTree* argx = curArgTabEntry->GetNode(); GenTree* setupArg = nullptr; GenTree* defArg; #if !FEATURE_FIXED_OUT_ARGS // Only ever set for FEATURE_FIXED_OUT_ARGS assert(curArgTabEntry->needPlace == false); // On x86 and other archs that use push instructions to pass arguments: // Only the register arguments need to be replaced with placeholder nodes. // Stacked arguments are evaluated and pushed (or stored into the stack) in order. // if (curArgTabEntry->GetRegNum() == REG_STK) continue; #endif if (curArgTabEntry->needTmp) { if (curArgTabEntry->isTmp) { // Create a copy of the temp to go into the late argument list defArg = compiler->fgMakeTmpArgNode(curArgTabEntry); // mark the original node as a late argument argx->gtFlags |= GTF_LATE_ARG; } else { // Create a temp assignment for the argument // Put the temp in the gtCallLateArgs list CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (compiler->verbose) { printf("Argument with 'side effect'...\n"); compiler->gtDispTree(argx); } #endif #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) noway_assert(argx->gtType != TYP_STRUCT); #endif unsigned tmpVarNum = compiler->lvaGrabTemp(true DEBUGARG("argument with side effect")); if (argx->gtOper == GT_MKREFANY) { // For GT_MKREFANY, typically the actual struct copying does // not have any side-effects and can be delayed. So instead // of using a temp for the whole struct, we can just use a temp // for operand that that has a side-effect GenTree* operand; if ((argx->AsOp()->gtOp2->gtFlags & GTF_ALL_EFFECT) == 0) { operand = argx->AsOp()->gtOp1; // In the early argument evaluation, place an assignment to the temp // from the source operand of the mkrefany setupArg = compiler->gtNewTempAssign(tmpVarNum, operand); // Replace the operand for the mkrefany with the new temp. argx->AsOp()->gtOp1 = compiler->gtNewLclvNode(tmpVarNum, operand->TypeGet()); } else if ((argx->AsOp()->gtOp1->gtFlags & GTF_ALL_EFFECT) == 0) { operand = argx->AsOp()->gtOp2; // In the early argument evaluation, place an assignment to the temp // from the source operand of the mkrefany setupArg = compiler->gtNewTempAssign(tmpVarNum, operand); // Replace the operand for the mkrefany with the new temp. argx->AsOp()->gtOp2 = compiler->gtNewLclvNode(tmpVarNum, operand->TypeGet()); } } if (setupArg != nullptr) { // Now keep the mkrefany for the late argument list defArg = argx; // Clear the side-effect flags because now both op1 and op2 have no side-effects defArg->gtFlags &= ~GTF_ALL_EFFECT; } else { setupArg = compiler->gtNewTempAssign(tmpVarNum, argx); LclVarDsc* varDsc = compiler->lvaGetDesc(tmpVarNum); var_types lclVarType = genActualType(argx->gtType); var_types scalarType = TYP_UNKNOWN; if (setupArg->OperIsCopyBlkOp()) { setupArg = compiler->fgMorphCopyBlock(setupArg); #if defined(TARGET_ARMARCH) || defined(UNIX_AMD64_ABI) if (lclVarType == TYP_STRUCT) { // This scalar LclVar widening step is only performed for ARM architectures. // CORINFO_CLASS_HANDLE clsHnd = compiler->lvaGetStruct(tmpVarNum); unsigned structSize = varDsc->lvExactSize; scalarType = compiler->getPrimitiveTypeForStruct(structSize, clsHnd, curArgTabEntry->IsVararg()); } #endif // TARGET_ARMARCH || defined (UNIX_AMD64_ABI) } // scalarType can be set to a wider type for ARM or unix amd64 architectures: (3 => 4) or (5,6,7 => // 8) if ((scalarType != TYP_UNKNOWN) && (scalarType != lclVarType)) { // Create a GT_LCL_FLD using the wider type to go to the late argument list defArg = compiler->gtNewLclFldNode(tmpVarNum, scalarType, 0); } else { // Create a copy of the temp to go to the late argument list defArg = compiler->gtNewLclvNode(tmpVarNum, lclVarType); } curArgTabEntry->isTmp = true; curArgTabEntry->tmpNum = tmpVarNum; #ifdef TARGET_ARM // Previously we might have thought the local was promoted, and thus the 'COPYBLK' // might have left holes in the used registers (see // fgAddSkippedRegsInPromotedStructArg). // Too bad we're not that smart for these intermediate temps... if (isValidIntArgReg(curArgTabEntry->GetRegNum()) && (curArgTabEntry->numRegs > 1)) { regNumber argReg = curArgTabEntry->GetRegNum(); regMaskTP allUsedRegs = genRegMask(curArgTabEntry->GetRegNum()); for (unsigned i = 1; i < curArgTabEntry->numRegs; i++) { argReg = genRegArgNext(argReg); allUsedRegs |= genRegMask(argReg); } } #endif // TARGET_ARM } /* mark the assignment as a late argument */ setupArg->gtFlags |= GTF_LATE_ARG; #ifdef DEBUG if (compiler->verbose) { printf("\n Evaluate to a temp:\n"); compiler->gtDispTree(setupArg); } #endif } } else // curArgTabEntry->needTmp == false { // On x86 - // Only register args are replaced with placeholder nodes // and the stack based arguments are evaluated and pushed in order. // // On Arm/x64 - When needTmp is false and needPlace is false, // the non-register arguments are evaluated and stored in order. // When needPlace is true we have a nested call that comes after // this argument so we have to replace it in the gtCallArgs list // (the initial argument evaluation list) with a placeholder. // if ((curArgTabEntry->GetRegNum() == REG_STK) && (curArgTabEntry->needPlace == false)) { continue; } /* No temp needed - move the whole node to the gtCallLateArgs list */ /* The argument is deferred and put in the late argument list */ defArg = argx; // Create a placeholder node to put in its place in gtCallLateArgs. // For a struct type we also need to record the class handle of the arg. CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE; #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // All structs are either passed (and retyped) as integral types, OR they // are passed by reference. noway_assert(argx->gtType != TYP_STRUCT); #else // !defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI) if (defArg->TypeGet() == TYP_STRUCT) { clsHnd = compiler->gtGetStructHandleIfPresent(defArg); noway_assert(clsHnd != NO_CLASS_HANDLE); } #endif // !(defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) setupArg = compiler->gtNewArgPlaceHolderNode(defArg->gtType, clsHnd); /* mark the placeholder node as a late argument */ setupArg->gtFlags |= GTF_LATE_ARG; #ifdef DEBUG if (compiler->verbose) { if (curArgTabEntry->GetRegNum() == REG_STK) { printf("Deferred stack argument :\n"); } else { printf("Deferred argument ('%s'):\n", getRegName(curArgTabEntry->GetRegNum())); } compiler->gtDispTree(argx); printf("Replaced with placeholder node:\n"); compiler->gtDispTree(setupArg); } #endif } if (setupArg != nullptr) { noway_assert(curArgTabEntry->use->GetNode() == argx); curArgTabEntry->use->SetNode(setupArg); } /* deferred arg goes into the late argument list */ if (tmpRegArgNext == nullptr) { tmpRegArgNext = compiler->gtNewCallArgs(defArg); callTree->AsCall()->gtCallLateArgs = tmpRegArgNext; } else { noway_assert(tmpRegArgNext->GetNode() != nullptr); tmpRegArgNext->SetNext(compiler->gtNewCallArgs(defArg)); tmpRegArgNext = tmpRegArgNext->GetNext(); } curArgTabEntry->lateUse = tmpRegArgNext; curArgTabEntry->SetLateArgInx(regArgInx++); } #ifdef DEBUG if (compiler->verbose) { printf("\nShuffled argument table: "); for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; if (curArgTabEntry->GetRegNum() != REG_STK) { printf("%s ", getRegName(curArgTabEntry->GetRegNum())); } } printf("\n"); } #endif } //------------------------------------------------------------------------------ // fgMakeMultiUse : If the node is an unaliased local or constant clone it, // otherwise insert a comma form temp // // Arguments: // ppTree - a pointer to the child node we will be replacing with the comma expression that // evaluates ppTree to a temp and returns the result // // Return Value: // A fresh GT_LCL_VAR node referencing the temp which has not been used // // Notes: // Caller must ensure that if the node is an unaliased local, the second use this // creates will be evaluated before the local can be reassigned. // // Can be safely called in morph preorder, before GTF_GLOB_REF is reliable. // GenTree* Compiler::fgMakeMultiUse(GenTree** pOp) { GenTree* const tree = *pOp; if (tree->IsInvariant()) { return gtClone(tree); } else if (tree->IsLocal()) { // Can't rely on GTF_GLOB_REF here. // if (!lvaGetDesc(tree->AsLclVarCommon())->IsAddressExposed()) { return gtClone(tree); } } return fgInsertCommaFormTemp(pOp); } //------------------------------------------------------------------------------ // fgInsertCommaFormTemp: Create a new temporary variable to hold the result of *ppTree, // and replace *ppTree with comma(asg(newLcl, *ppTree), newLcl) // // Arguments: // ppTree - a pointer to the child node we will be replacing with the comma expression that // evaluates ppTree to a temp and returns the result // // structType - value type handle if the temp created is of TYP_STRUCT. // // Return Value: // A fresh GT_LCL_VAR node referencing the temp which has not been used // GenTree* Compiler::fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType /*= nullptr*/) { GenTree* subTree = *ppTree; unsigned lclNum = lvaGrabTemp(true DEBUGARG("fgInsertCommaFormTemp is creating a new local variable")); if (varTypeIsStruct(subTree)) { assert(structType != nullptr); lvaSetStruct(lclNum, structType, false); } // If subTree->TypeGet() == TYP_STRUCT, gtNewTempAssign() will create a GT_COPYBLK tree. // The type of GT_COPYBLK is TYP_VOID. Therefore, we should use subTree->TypeGet() for // setting type of lcl vars created. GenTree* asg = gtNewTempAssign(lclNum, subTree); GenTree* load = new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, subTree->TypeGet(), lclNum); GenTree* comma = gtNewOperNode(GT_COMMA, subTree->TypeGet(), asg, load); *ppTree = comma; return new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, subTree->TypeGet(), lclNum); } //------------------------------------------------------------------------ // fgInitArgInfo: Construct the fgArgInfo for the call with the fgArgEntry for each arg // // Arguments: // callNode - the call for which we are generating the fgArgInfo // // Return Value: // None // // Notes: // This method is idempotent in that it checks whether the fgArgInfo has already been // constructed, and just returns. // This method only computes the arg table and arg entries for the call (the fgArgInfo), // and makes no modification of the args themselves. // // The IR for the call args can change for calls with non-standard arguments: some non-standard // arguments add new call argument IR nodes. // void Compiler::fgInitArgInfo(GenTreeCall* call) { GenTreeCall::Use* args; GenTree* argx; unsigned argIndex = 0; unsigned intArgRegNum = 0; unsigned fltArgRegNum = 0; DEBUG_ARG_SLOTS_ONLY(unsigned argSlots = 0;) bool callHasRetBuffArg = call->HasRetBufArg(); bool callIsVararg = call->IsVarargs(); #ifdef TARGET_ARM regMaskTP argSkippedRegMask = RBM_NONE; regMaskTP fltArgSkippedRegMask = RBM_NONE; #endif // TARGET_ARM #if defined(TARGET_X86) unsigned maxRegArgs = MAX_REG_ARG; // X86: non-const, must be calculated #else const unsigned maxRegArgs = MAX_REG_ARG; // other arch: fixed constant number #endif if (call->fgArgInfo != nullptr) { // We've already initialized and set the fgArgInfo. return; } JITDUMP("Initializing arg info for %d.%s:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); // At this point, we should never have gtCallLateArgs, as this needs to be done before those are determined. assert(call->gtCallLateArgs == nullptr); if (TargetOS::IsUnix && callIsVararg) { // Currently native varargs is not implemented on non windows targets. // // Note that some targets like Arm64 Unix should not need much work as // the ABI is the same. While other targets may only need small changes // such as amd64 Unix, which just expects RAX to pass numFPArguments. NYI("Morphing Vararg call not yet implemented on non Windows targets."); } // Data structure for keeping track of non-standard args. Non-standard args are those that are not passed // following the normal calling convention or in the normal argument registers. We either mark existing // arguments as non-standard (such as the x8 return buffer register on ARM64), or we manually insert the // non-standard arguments into the argument list, below. class NonStandardArgs { struct NonStandardArg { GenTree* node; // The tree node representing this non-standard argument. // Note that this must be updated if the tree node changes due to morphing! regNumber reg; // The register to be assigned to this non-standard argument. NonStandardArgKind kind; // The kind of the non-standard arg }; ArrayStack<NonStandardArg> args; public: NonStandardArgs(CompAllocator alloc) : args(alloc, 3) // We will have at most 3 non-standard arguments { } //----------------------------------------------------------------------------- // Add: add a non-standard argument to the table of non-standard arguments // // Arguments: // node - a GenTree node that has a non-standard argument. // reg - the register to assign to this node. // // Return Value: // None. // void Add(GenTree* node, regNumber reg, NonStandardArgKind kind) { NonStandardArg nsa = {node, reg, kind}; args.Push(nsa); } //----------------------------------------------------------------------------- // Find: Look for a GenTree* in the set of non-standard args. // // Arguments: // node - a GenTree node to look for // // Return Value: // The index of the non-standard argument (a non-negative, unique, stable number). // If the node is not a non-standard argument, return -1. // int Find(GenTree* node) { for (int i = 0; i < args.Height(); i++) { if (node == args.Top(i).node) { return i; } } return -1; } //----------------------------------------------------------------------------- // Find: Look for a GenTree node in the non-standard arguments set. If found, // set the register to use for the node. // // Arguments: // node - a GenTree node to look for // pReg - an OUT argument. *pReg is set to the non-standard register to use if // 'node' is found in the non-standard argument set. // pKind - an OUT argument. *pKind is set to the kind of the non-standard arg. // // Return Value: // 'true' if 'node' is a non-standard argument. In this case, *pReg and *pKing are set. // 'false' otherwise (in this case, *pReg and *pKind are unmodified). // bool Find(GenTree* node, regNumber* pReg, NonStandardArgKind* pKind) { for (int i = 0; i < args.Height(); i++) { NonStandardArg& nsa = args.TopRef(i); if (node == nsa.node) { *pReg = nsa.reg; *pKind = nsa.kind; return true; } } return false; } //----------------------------------------------------------------------------- // Replace: Replace the non-standard argument node at a given index. This is done when // the original node was replaced via morphing, but we need to continue to assign a // particular non-standard arg to it. // // Arguments: // index - the index of the non-standard arg. It must exist. // node - the new GenTree node. // // Return Value: // None. // void Replace(int index, GenTree* node) { args.TopRef(index).node = node; } } nonStandardArgs(getAllocator(CMK_ArrayStack)); // Count of args. On first morph, this is counted before we've filled in the arg table. // On remorph, we grab it from the arg table. unsigned numArgs = 0; // First we need to count the args if (call->gtCallThisArg != nullptr) { numArgs++; } for (GenTreeCall::Use& use : call->Args()) { numArgs++; } // Insert or mark non-standard args. These are either outside the normal calling convention, or // arguments registers that don't follow the normal progression of argument registers in the calling // convention (such as for the ARM64 fixed return buffer argument x8). // // *********** NOTE ************* // The logic here must remain in sync with GetNonStandardAddedArgCount(), which is used to map arguments // in the implementation of fast tail call. // *********** END NOTE ********* CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) || defined(TARGET_ARM) // The x86 and arm32 CORINFO_HELP_INIT_PINVOKE_FRAME helpers has a custom calling convention. // Set the argument registers correctly here. if (call->IsHelperCall(this, CORINFO_HELP_INIT_PINVOKE_FRAME)) { GenTreeCall::Use* args = call->gtCallArgs; GenTree* arg1 = args->GetNode(); assert(arg1 != nullptr); nonStandardArgs.Add(arg1, REG_PINVOKE_FRAME, NonStandardArgKind::PInvokeFrame); } #endif // defined(TARGET_X86) || defined(TARGET_ARM) #if defined(TARGET_ARM) // A non-standard calling convention using wrapper delegate invoke is used on ARM, only, for wrapper // delegates. It is used for VSD delegate calls where the VSD custom calling convention ABI requires passing // R4, a callee-saved register, with a special value. Since R4 is a callee-saved register, its value needs // to be preserved. Thus, the VM uses a wrapper delegate IL stub, which preserves R4 and also sets up R4 // correctly for the VSD call. The VM is simply reusing an existing mechanism (wrapper delegate IL stub) // to achieve its goal for delegate VSD call. See COMDelegate::NeedsWrapperDelegate() in the VM for details. else if (call->gtCallMoreFlags & GTF_CALL_M_WRAPPER_DELEGATE_INV) { GenTree* arg = call->gtCallThisArg->GetNode(); if (arg->OperIsLocal()) { arg = gtClone(arg, true); } else { GenTree* tmp = fgInsertCommaFormTemp(&arg); call->gtCallThisArg->SetNode(arg); call->gtFlags |= GTF_ASG; arg = tmp; } noway_assert(arg != nullptr); GenTree* newArg = new (this, GT_ADDR) GenTreeAddrMode(TYP_BYREF, arg, nullptr, 0, eeGetEEInfo()->offsetOfWrapperDelegateIndirectCell); // Append newArg as the last arg GenTreeCall::Use** insertionPoint = &call->gtCallArgs; for (; *insertionPoint != nullptr; insertionPoint = &((*insertionPoint)->NextRef())) { } *insertionPoint = gtNewCallArgs(newArg); numArgs++; nonStandardArgs.Add(newArg, virtualStubParamInfo->GetReg(), NonStandardArgKind::WrapperDelegateCell); } #endif // defined(TARGET_ARM) #if defined(TARGET_X86) // The x86 shift helpers have custom calling conventions and expect the lo part of the long to be in EAX and the // hi part to be in EDX. This sets the argument registers up correctly. else if (call->IsHelperCall(this, CORINFO_HELP_LLSH) || call->IsHelperCall(this, CORINFO_HELP_LRSH) || call->IsHelperCall(this, CORINFO_HELP_LRSZ)) { GenTreeCall::Use* args = call->gtCallArgs; GenTree* arg1 = args->GetNode(); assert(arg1 != nullptr); nonStandardArgs.Add(arg1, REG_LNGARG_LO, NonStandardArgKind::ShiftLow); args = args->GetNext(); GenTree* arg2 = args->GetNode(); assert(arg2 != nullptr); nonStandardArgs.Add(arg2, REG_LNGARG_HI, NonStandardArgKind::ShiftHigh); } #else // !TARGET_X86 // TODO-X86-CQ: Currently RyuJIT/x86 passes args on the stack, so this is not needed. // If/when we change that, the following code needs to be changed to correctly support the (TBD) managed calling // convention for x86/SSE. // If we have a Fixed Return Buffer argument register then we setup a non-standard argument for it. // // We don't use the fixed return buffer argument if we have the special unmanaged instance call convention. // That convention doesn't use the fixed return buffer register. // CLANG_FORMAT_COMMENT_ANCHOR; if (call->HasFixedRetBufArg()) { args = call->gtCallArgs; assert(args != nullptr); argx = call->gtCallArgs->GetNode(); // We don't increment numArgs here, since we already counted this argument above. nonStandardArgs.Add(argx, theFixedRetBuffReg(), NonStandardArgKind::FixedRetBuffer); } // We are allowed to have a Fixed Return Buffer argument combined // with any of the remaining non-standard arguments // CLANG_FORMAT_COMMENT_ANCHOR; if (call->IsVirtualStub()) { if (!call->IsTailCallViaJitHelper()) { GenTree* stubAddrArg = fgGetStubAddrArg(call); // And push the stub address onto the list of arguments call->gtCallArgs = gtPrependNewCallArg(stubAddrArg, call->gtCallArgs); numArgs++; nonStandardArgs.Add(stubAddrArg, stubAddrArg->GetRegNum(), NonStandardArgKind::VirtualStubCell); } else { // If it is a VSD call getting dispatched via tail call helper, // fgMorphTailCallViaJitHelper() would materialize stub addr as an additional // parameter added to the original arg list and hence no need to // add as a non-standard arg. } } else #endif // !TARGET_X86 if (call->gtCallType == CT_INDIRECT && (call->gtCallCookie != nullptr)) { assert(!call->IsUnmanaged()); GenTree* arg = call->gtCallCookie; noway_assert(arg != nullptr); call->gtCallCookie = nullptr; // All architectures pass the cookie in a register. call->gtCallArgs = gtPrependNewCallArg(arg, call->gtCallArgs); nonStandardArgs.Add(arg, REG_PINVOKE_COOKIE_PARAM, NonStandardArgKind::PInvokeCookie); numArgs++; // put destination into R10/EAX arg = gtClone(call->gtCallAddr, true); call->gtCallArgs = gtPrependNewCallArg(arg, call->gtCallArgs); numArgs++; nonStandardArgs.Add(arg, REG_PINVOKE_TARGET_PARAM, NonStandardArgKind::PInvokeTarget); // finally change this call to a helper call call->gtCallType = CT_HELPER; call->gtCallMethHnd = eeFindHelper(CORINFO_HELP_PINVOKE_CALLI); } #if defined(FEATURE_READYTORUN) // For arm/arm64, we dispatch code same as VSD using virtualStubParamInfo->GetReg() // for indirection cell address, which ZapIndirectHelperThunk expects. // For x64/x86 we use return address to get the indirection cell by disassembling the call site. // That is not possible for fast tailcalls, so we only need this logic for fast tailcalls on xarch. // Note that we call this before we know if something will be a fast tailcall or not. // That's ok; after making something a tailcall, we will invalidate this information // and reconstruct it if necessary. The tailcalling decision does not change since // this is a non-standard arg in a register. bool needsIndirectionCell = call->IsR2RRelativeIndir() && !call->IsDelegateInvoke(); #if defined(TARGET_XARCH) needsIndirectionCell &= call->IsFastTailCall(); #endif if (needsIndirectionCell) { assert(call->gtEntryPoint.addr != nullptr); size_t addrValue = (size_t)call->gtEntryPoint.addr; GenTree* indirectCellAddress = gtNewIconHandleNode(addrValue, GTF_ICON_FTN_ADDR); #ifdef DEBUG indirectCellAddress->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd; #endif indirectCellAddress->SetRegNum(REG_R2R_INDIRECT_PARAM); #ifdef TARGET_ARM // Issue #xxxx : Don't attempt to CSE this constant on ARM32 // // This constant has specific register requirements, and LSRA doesn't currently correctly // handle them when the value is in a CSE'd local. indirectCellAddress->SetDoNotCSE(); #endif // TARGET_ARM // Push the stub address onto the list of arguments. call->gtCallArgs = gtPrependNewCallArg(indirectCellAddress, call->gtCallArgs); numArgs++; nonStandardArgs.Add(indirectCellAddress, indirectCellAddress->GetRegNum(), NonStandardArgKind::R2RIndirectionCell); } #endif if ((REG_VALIDATE_INDIRECT_CALL_ADDR != REG_ARG_0) && call->IsHelperCall(this, CORINFO_HELP_VALIDATE_INDIRECT_CALL)) { assert(call->gtCallArgs != nullptr); GenTreeCall::Use* args = call->gtCallArgs; GenTree* tar = args->GetNode(); nonStandardArgs.Add(tar, REG_VALIDATE_INDIRECT_CALL_ADDR, NonStandardArgKind::ValidateIndirectCallTarget); } // Allocate the fgArgInfo for the call node; // call->fgArgInfo = new (this, CMK_Unknown) fgArgInfo(this, call, numArgs); // Add the 'this' argument value, if present. if (call->gtCallThisArg != nullptr) { argx = call->gtCallThisArg->GetNode(); assert(argIndex == 0); assert(call->gtCallType == CT_USER_FUNC || call->gtCallType == CT_INDIRECT); assert(varTypeIsGC(argx) || (argx->gtType == TYP_I_IMPL)); const regNumber regNum = genMapIntRegArgNumToRegNum(intArgRegNum); const unsigned numRegs = 1; const unsigned byteSize = TARGET_POINTER_SIZE; const unsigned byteAlignment = TARGET_POINTER_SIZE; const bool isStruct = false; const bool isFloatHfa = false; // This is a register argument - put it in the table. call->fgArgInfo->AddRegArg(argIndex, argx, call->gtCallThisArg, regNum, numRegs, byteSize, byteAlignment, isStruct, isFloatHfa, callIsVararg UNIX_AMD64_ABI_ONLY_ARG(REG_STK) UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(nullptr)); intArgRegNum++; #ifdef WINDOWS_AMD64_ABI // Whenever we pass an integer register argument // we skip the corresponding floating point register argument fltArgRegNum++; #endif // WINDOWS_AMD64_ABI argIndex++; DEBUG_ARG_SLOTS_ONLY(argSlots++;) } #ifdef TARGET_X86 // Compute the maximum number of arguments that can be passed in registers. // For X86 we handle the varargs and unmanaged calling conventions #ifndef UNIX_X86_ABI if (call->gtFlags & GTF_CALL_POP_ARGS) { noway_assert(intArgRegNum < MAX_REG_ARG); // No more register arguments for varargs (CALL_POP_ARGS) maxRegArgs = intArgRegNum; // Add in the ret buff arg if (callHasRetBuffArg) maxRegArgs++; } #endif // UNIX_X86_ABI if (call->IsUnmanaged()) { noway_assert(intArgRegNum == 0); if (call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { noway_assert(call->gtCallArgs->GetNode()->TypeGet() == TYP_I_IMPL || call->gtCallArgs->GetNode()->TypeGet() == TYP_BYREF || call->gtCallArgs->GetNode()->gtOper == GT_NOP); // the arg was already morphed to a register (fgMorph called twice) maxRegArgs = 1; } else { maxRegArgs = 0; } #ifdef UNIX_X86_ABI // Add in the ret buff arg if (callHasRetBuffArg && call->unmgdCallConv != CorInfoCallConvExtension::C && // C and Stdcall calling conventions do not call->unmgdCallConv != CorInfoCallConvExtension::Stdcall) // use registers to pass arguments. maxRegArgs++; #endif } #endif // TARGET_X86 /* Morph the user arguments */ CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM) // The ARM ABI has a concept of back-filling of floating-point argument registers, according // to the "Procedure Call Standard for the ARM Architecture" document, especially // section 6.1.2.3 "Parameter passing". Back-filling is where floating-point argument N+1 can // appear in a lower-numbered register than floating point argument N. That is, argument // register allocation is not strictly increasing. To support this, we need to keep track of unused // floating-point argument registers that we can back-fill. We only support 4-byte float and // 8-byte double types, and one to four element HFAs composed of these types. With this, we will // only back-fill single registers, since there is no way with these types to create // an alignment hole greater than one register. However, there can be up to 3 back-fill slots // available (with 16 FP argument registers). Consider this code: // // struct HFA { float x, y, z; }; // a three element HFA // void bar(float a1, // passed in f0 // double a2, // passed in f2/f3; skip f1 for alignment // HFA a3, // passed in f4/f5/f6 // double a4, // passed in f8/f9; skip f7 for alignment. NOTE: it doesn't fit in the f1 back-fill slot // HFA a5, // passed in f10/f11/f12 // double a6, // passed in f14/f15; skip f13 for alignment. NOTE: it doesn't fit in the f1 or f7 back-fill // // slots // float a7, // passed in f1 (back-filled) // float a8, // passed in f7 (back-filled) // float a9, // passed in f13 (back-filled) // float a10) // passed on the stack in [OutArg+0] // // Note that if we ever support FP types with larger alignment requirements, then there could // be more than single register back-fills. // // Once we assign a floating-pointer register to the stack, they all must be on the stack. // See "Procedure Call Standard for the ARM Architecture", section 6.1.2.3, "The back-filling // continues only so long as no VFP CPRC has been allocated to a slot on the stack." // We set anyFloatStackArgs to true when a floating-point argument has been assigned to the stack // and prevent any additional floating-point arguments from going in registers. bool anyFloatStackArgs = false; #endif // TARGET_ARM #ifdef UNIX_AMD64_ABI SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; #endif // UNIX_AMD64_ABI #if defined(DEBUG) // Check that we have valid information about call's argument types. // For example: // load byte; call(int) -> CALL(PUTARG_TYPE byte(IND byte)); // load int; call(byte) -> CALL(PUTARG_TYPE int (IND int)); // etc. if (call->callSig != nullptr) { CORINFO_SIG_INFO* sig = call->callSig; const unsigned sigArgsCount = sig->numArgs; GenTreeCall::Use* nodeArgs = call->gtCallArgs; // It could include many arguments not included in `sig->numArgs`, for example, `this`, runtime lookup, cookie // etc. unsigned nodeArgsCount = 0; call->VisitOperands([&nodeArgsCount](GenTree* operand) -> GenTree::VisitResult { nodeArgsCount++; return GenTree::VisitResult::Continue; }); if (call->gtCallThisArg != nullptr) { // Handle the most common argument not in the `sig->numArgs`. // so the following check works on more methods. nodeArgsCount--; } assert(nodeArgsCount >= sigArgsCount); if ((nodeArgsCount == sigArgsCount) && ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (nodeArgsCount == 1))) { CORINFO_ARG_LIST_HANDLE sigArg = sig->args; for (unsigned i = 0; i < sig->numArgs; ++i) { CORINFO_CLASS_HANDLE argClass; const CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArg, &argClass)); const var_types sigType = JITtype2varType(corType); assert(nodeArgs != nullptr); const GenTree* nodeArg = nodeArgs->GetNode(); assert(nodeArg != nullptr); const var_types nodeType = nodeArg->TypeGet(); assert((nodeType == sigType) || varTypeIsStruct(sigType) || genTypeSize(nodeType) == genTypeSize(sigType)); sigArg = info.compCompHnd->getArgNext(sigArg); nodeArgs = nodeArgs->GetNext(); } assert(nodeArgs == nullptr); } } #endif // DEBUG for (args = call->gtCallArgs; args != nullptr; args = args->GetNext(), argIndex++) { argx = args->GetNode()->gtSkipPutArgType(); // Change the node to TYP_I_IMPL so we don't report GC info // NOTE: We deferred this from the importer because of the inliner. if (argx->IsLocalAddrExpr() != nullptr) { argx->gtType = TYP_I_IMPL; } // We should never have any ArgPlaceHolder nodes at this point. assert(!argx->IsArgPlaceHolderNode()); // Setup any HFA information about 'argx' bool isHfaArg = false; var_types hfaType = TYP_UNDEF; unsigned hfaSlots = 0; bool passUsingFloatRegs; unsigned argAlignBytes = TARGET_POINTER_SIZE; unsigned size = 0; unsigned byteSize = 0; if (GlobalJitOptions::compFeatureHfa) { hfaType = GetHfaType(argx); isHfaArg = varTypeIsValidHfaType(hfaType); #if defined(TARGET_ARM64) if (TargetOS::IsWindows) { // Make sure for vararg methods isHfaArg is not true. isHfaArg = callIsVararg ? false : isHfaArg; } #endif // defined(TARGET_ARM64) if (isHfaArg) { isHfaArg = true; hfaSlots = GetHfaCount(argx); // If we have a HFA struct it's possible we transition from a method that originally // only had integer types to now start having FP types. We have to communicate this // through this flag since LSRA later on will use this flag to determine whether // or not to track the FP register set. // compFloatingPointUsed = true; } } const bool isFloatHfa = (hfaType == TYP_FLOAT); #ifdef TARGET_ARM passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeUsesFloatReg(argx)) && !opts.compUseSoftFP; bool passUsingIntRegs = passUsingFloatRegs ? false : (intArgRegNum < MAX_REG_ARG); // We don't use the "size" return value from InferOpSizeAlign(). codeGen->InferOpSizeAlign(argx, &argAlignBytes); argAlignBytes = roundUp(argAlignBytes, TARGET_POINTER_SIZE); if (argAlignBytes == 2 * TARGET_POINTER_SIZE) { if (passUsingFloatRegs) { if (fltArgRegNum % 2 == 1) { fltArgSkippedRegMask |= genMapArgNumToRegMask(fltArgRegNum, TYP_FLOAT); fltArgRegNum++; } } else if (passUsingIntRegs) { if (intArgRegNum % 2 == 1) { argSkippedRegMask |= genMapArgNumToRegMask(intArgRegNum, TYP_I_IMPL); intArgRegNum++; } } #if defined(DEBUG) if (argSlots % 2 == 1) { argSlots++; } #endif } #elif defined(TARGET_ARM64) assert(!callIsVararg || !isHfaArg); passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeUsesFloatReg(argx)); #elif defined(TARGET_AMD64) passUsingFloatRegs = varTypeIsFloating(argx); #elif defined(TARGET_X86) passUsingFloatRegs = false; #else #error Unsupported or unset target architecture #endif // TARGET* bool isBackFilled = false; unsigned nextFltArgRegNum = fltArgRegNum; // This is the next floating-point argument register number to use var_types structBaseType = TYP_STRUCT; unsigned structSize = 0; bool passStructByRef = false; bool isStructArg; GenTree* actualArg = argx->gtEffectiveVal(true /* Commas only */); // // Figure out the size of the argument. This is either in number of registers, or number of // TARGET_POINTER_SIZE stack slots, or the sum of these if the argument is split between the registers and // the stack. // isStructArg = varTypeIsStruct(argx); CORINFO_CLASS_HANDLE objClass = NO_CLASS_HANDLE; if (isStructArg) { objClass = gtGetStructHandle(argx); if (argx->TypeGet() == TYP_STRUCT) { // For TYP_STRUCT arguments we must have an OBJ, LCL_VAR or MKREFANY switch (actualArg->OperGet()) { case GT_OBJ: structSize = actualArg->AsObj()->GetLayout()->GetSize(); assert(structSize == info.compCompHnd->getClassSize(objClass)); break; case GT_LCL_VAR: structSize = lvaGetDesc(actualArg->AsLclVarCommon())->lvExactSize; break; case GT_MKREFANY: structSize = info.compCompHnd->getClassSize(objClass); break; default: BADCODE("illegal argument tree in fgInitArgInfo"); break; } } else { structSize = genTypeSize(argx); assert(structSize == info.compCompHnd->getClassSize(objClass)); } } #if defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI if (!isStructArg) { size = 1; // On AMD64, all primitives fit in a single (64-bit) 'slot' byteSize = genTypeSize(argx); } else { size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE; byteSize = structSize; eeGetSystemVAmd64PassStructInRegisterDescriptor(objClass, &structDesc); } #else // !UNIX_AMD64_ABI size = 1; // On AMD64 Windows, all args fit in a single (64-bit) 'slot' if (!isStructArg) { byteSize = genTypeSize(argx); } #endif // UNIX_AMD64_ABI #elif defined(TARGET_ARM64) if (isStructArg) { if (isHfaArg) { // HFA structs are passed by value in multiple registers. // The "size" in registers may differ the size in pointer-sized units. CORINFO_CLASS_HANDLE structHnd = gtGetStructHandle(argx); size = GetHfaCount(structHnd); byteSize = info.compCompHnd->getClassSize(structHnd); } else { // Structs are either passed in 1 or 2 (64-bit) slots. // Structs that are the size of 2 pointers are passed by value in multiple registers, // if sufficient registers are available. // Structs that are larger than 2 pointers (except for HFAs) are passed by // reference (to a copy) size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE; byteSize = structSize; if (size > 2) { size = 1; } } // Note that there are some additional rules for multireg structs. // (i.e they cannot be split between registers and the stack) } else { size = 1; // Otherwise, all primitive types fit in a single (64-bit) 'slot' byteSize = genTypeSize(argx); } #elif defined(TARGET_ARM) || defined(TARGET_X86) if (isStructArg) { size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE; byteSize = structSize; } else { // The typical case. // Long/double type argument(s) will be modified as needed in Lowering. size = genTypeStSz(argx->gtType); byteSize = genTypeSize(argx); } #else #error Unsupported or unset target architecture #endif // TARGET_XXX if (isStructArg) { assert(argx == args->GetNode()); assert(structSize != 0); structPassingKind howToPassStruct; structBaseType = getArgTypeForStruct(objClass, &howToPassStruct, callIsVararg, structSize); passStructByRef = (howToPassStruct == SPK_ByReference); if (howToPassStruct == SPK_ByReference) { byteSize = TARGET_POINTER_SIZE; } else { byteSize = structSize; } if (howToPassStruct == SPK_PrimitiveType) { #ifdef TARGET_ARM // TODO-CQ: getArgTypeForStruct should *not* return TYP_DOUBLE for a double struct, // or for a struct of two floats. This causes the struct to be address-taken. if (structBaseType == TYP_DOUBLE) { size = 2; } else #endif // TARGET_ARM { size = 1; } } else if (passStructByRef) { size = 1; } } const var_types argType = args->GetNode()->TypeGet(); if (args->GetNode()->OperIs(GT_PUTARG_TYPE)) { byteSize = genTypeSize(argType); } // The 'size' value has now must have been set. (the original value of zero is an invalid value) assert(size != 0); assert(byteSize != 0); if (compMacOsArm64Abi()) { // Arm64 Apple has a special ABI for passing small size arguments on stack, // bytes are aligned to 1-byte, shorts to 2-byte, int/float to 4-byte, etc. // It means passing 8 1-byte arguments on stack can take as small as 8 bytes. argAlignBytes = eeGetArgSizeAlignment(argType, isFloatHfa); } // // Figure out if the argument will be passed in a register. // bool isRegArg = false; NonStandardArgKind nonStandardArgKind = NonStandardArgKind::None; regNumber nonStdRegNum = REG_NA; if (isRegParamType(genActualType(argx->TypeGet())) #ifdef UNIX_AMD64_ABI && (!isStructArg || structDesc.passedInRegisters) #elif defined(TARGET_X86) || (isStructArg && isTrivialPointerSizedStruct(objClass)) #endif ) { #ifdef TARGET_ARM if (passUsingFloatRegs) { // First, see if it can be back-filled if (!anyFloatStackArgs && // Is it legal to back-fill? (We haven't put any FP args on the stack yet) (fltArgSkippedRegMask != RBM_NONE) && // Is there an available back-fill slot? (size == 1)) // The size to back-fill is one float register { // Back-fill the register. isBackFilled = true; regMaskTP backFillBitMask = genFindLowestBit(fltArgSkippedRegMask); fltArgSkippedRegMask &= ~backFillBitMask; // Remove the back-filled register(s) from the skipped mask nextFltArgRegNum = genMapFloatRegNumToRegArgNum(genRegNumFromMask(backFillBitMask)); assert(nextFltArgRegNum < MAX_FLOAT_REG_ARG); } // Does the entire float, double, or HFA fit in the FP arg registers? // Check if the last register needed is still in the argument register range. isRegArg = (nextFltArgRegNum + size - 1) < MAX_FLOAT_REG_ARG; if (!isRegArg) { anyFloatStackArgs = true; } } else { isRegArg = intArgRegNum < MAX_REG_ARG; } #elif defined(TARGET_ARM64) if (passUsingFloatRegs) { // Check if the last register needed is still in the fp argument register range. isRegArg = (nextFltArgRegNum + (size - 1)) < MAX_FLOAT_REG_ARG; // Do we have a HFA arg that we wanted to pass in registers, but we ran out of FP registers? if (isHfaArg && !isRegArg) { // recompute the 'size' so that it represent the number of stack slots rather than the number of // registers // unsigned roundupSize = (unsigned)roundUp(structSize, TARGET_POINTER_SIZE); size = roundupSize / TARGET_POINTER_SIZE; // We also must update fltArgRegNum so that we no longer try to // allocate any new floating point registers for args // This prevents us from backfilling a subsequent arg into d7 // fltArgRegNum = MAX_FLOAT_REG_ARG; } } else { // Check if the last register needed is still in the int argument register range. isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs; // Did we run out of registers when we had a 16-byte struct (size===2) ? // (i.e we only have one register remaining but we needed two registers to pass this arg) // This prevents us from backfilling a subsequent arg into x7 // if (!isRegArg && (size > 1)) { // Arm64 windows native varargs allows splitting a 16 byte struct between stack // and the last general purpose register. if (TargetOS::IsWindows && callIsVararg) { // Override the decision and force a split. isRegArg = (intArgRegNum + (size - 1)) <= maxRegArgs; } else { // We also must update intArgRegNum so that we no longer try to // allocate any new general purpose registers for args // intArgRegNum = maxRegArgs; } } } #else // not TARGET_ARM or TARGET_ARM64 #if defined(UNIX_AMD64_ABI) // Here a struct can be passed in register following the classifications of its members and size. // Now make sure there are actually enough registers to do so. if (isStructArg) { unsigned int structFloatRegs = 0; unsigned int structIntRegs = 0; for (unsigned int i = 0; i < structDesc.eightByteCount; i++) { if (structDesc.IsIntegralSlot(i)) { structIntRegs++; } else if (structDesc.IsSseSlot(i)) { structFloatRegs++; } } isRegArg = ((nextFltArgRegNum + structFloatRegs) <= MAX_FLOAT_REG_ARG) && ((intArgRegNum + structIntRegs) <= MAX_REG_ARG); } else { if (passUsingFloatRegs) { isRegArg = nextFltArgRegNum < MAX_FLOAT_REG_ARG; } else { isRegArg = intArgRegNum < MAX_REG_ARG; } } #else // !defined(UNIX_AMD64_ABI) isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs; #endif // !defined(UNIX_AMD64_ABI) #endif // TARGET_ARM } else { isRegArg = false; } // If there are nonstandard args (outside the calling convention) they were inserted above // and noted them in a table so we can recognize them here and build their argInfo. // // They should not affect the placement of any other args or stack space required. // Example: on AMD64 R10 and R11 are used for indirect VSD (generic interface) and cookie calls. bool isNonStandard = nonStandardArgs.Find(argx, &nonStdRegNum, &nonStandardArgKind); if (isNonStandard) { isRegArg = (nonStdRegNum != REG_STK); } else if (call->IsTailCallViaJitHelper()) { // We have already (before calling fgMorphArgs()) appended the 4 special args // required by the x86 tailcall helper. These args are required to go on the // stack. Force them to the stack here. assert(numArgs >= 4); if (argIndex >= numArgs - 4) { isRegArg = false; } } // Now we know if the argument goes in registers or not and how big it is. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM // If we ever allocate a floating point argument to the stack, then all // subsequent HFA/float/double arguments go on the stack. if (!isRegArg && passUsingFloatRegs) { for (; fltArgRegNum < MAX_FLOAT_REG_ARG; ++fltArgRegNum) { fltArgSkippedRegMask |= genMapArgNumToRegMask(fltArgRegNum, TYP_FLOAT); } } // If we think we're going to split a struct between integer registers and the stack, check to // see if we've already assigned a floating-point arg to the stack. if (isRegArg && // We decided above to use a register for the argument !passUsingFloatRegs && // We're using integer registers (intArgRegNum + size > MAX_REG_ARG) && // We're going to split a struct type onto registers and stack anyFloatStackArgs) // We've already used the stack for a floating-point argument { isRegArg = false; // Change our mind; don't pass this struct partially in registers // Skip the rest of the integer argument registers for (; intArgRegNum < MAX_REG_ARG; ++intArgRegNum) { argSkippedRegMask |= genMapArgNumToRegMask(intArgRegNum, TYP_I_IMPL); } } #endif // TARGET_ARM // Now create the fgArgTabEntry. fgArgTabEntry* newArgEntry; if (isRegArg) { regNumber nextRegNum = REG_STK; #if defined(UNIX_AMD64_ABI) regNumber nextOtherRegNum = REG_STK; unsigned int structFloatRegs = 0; unsigned int structIntRegs = 0; #endif // defined(UNIX_AMD64_ABI) if (isNonStandard) { nextRegNum = nonStdRegNum; } #if defined(UNIX_AMD64_ABI) else if (isStructArg && structDesc.passedInRegisters) { // It is a struct passed in registers. Assign the next available register. assert((structDesc.eightByteCount <= 2) && "Too many eightbytes."); regNumber* nextRegNumPtrs[2] = {&nextRegNum, &nextOtherRegNum}; for (unsigned int i = 0; i < structDesc.eightByteCount; i++) { if (structDesc.IsIntegralSlot(i)) { *nextRegNumPtrs[i] = genMapIntRegArgNumToRegNum(intArgRegNum + structIntRegs); ++structIntRegs; } else if (structDesc.IsSseSlot(i)) { *nextRegNumPtrs[i] = genMapFloatRegArgNumToRegNum(nextFltArgRegNum + structFloatRegs); ++structFloatRegs; } } } #endif // defined(UNIX_AMD64_ABI) else { // fill in or update the argInfo table nextRegNum = passUsingFloatRegs ? genMapFloatRegArgNumToRegNum(nextFltArgRegNum) : genMapIntRegArgNumToRegNum(intArgRegNum); } #ifdef TARGET_AMD64 #ifndef UNIX_AMD64_ABI assert(size == 1); #endif #endif // This is a register argument - put it in the table newArgEntry = call->fgArgInfo->AddRegArg(argIndex, argx, args, nextRegNum, size, byteSize, argAlignBytes, isStructArg, isFloatHfa, callIsVararg UNIX_AMD64_ABI_ONLY_ARG(nextOtherRegNum) UNIX_AMD64_ABI_ONLY_ARG(structIntRegs) UNIX_AMD64_ABI_ONLY_ARG(structFloatRegs) UNIX_AMD64_ABI_ONLY_ARG(&structDesc)); newArgEntry->SetIsBackFilled(isBackFilled); // Set up the next intArgRegNum and fltArgRegNum values. if (!isBackFilled) { #if defined(UNIX_AMD64_ABI) if (isStructArg) { // For this case, we've already set the regNums in the argTabEntry intArgRegNum += structIntRegs; fltArgRegNum += structFloatRegs; } else #endif // defined(UNIX_AMD64_ABI) { if (!isNonStandard) { #if FEATURE_ARG_SPLIT // Check for a split (partially enregistered) struct if (compFeatureArgSplit() && !passUsingFloatRegs && ((intArgRegNum + size) > MAX_REG_ARG)) { // This indicates a partial enregistration of a struct type assert((isStructArg) || argx->OperIs(GT_FIELD_LIST) || argx->OperIsCopyBlkOp() || (argx->gtOper == GT_COMMA && (argx->gtFlags & GTF_ASG))); unsigned numRegsPartial = MAX_REG_ARG - intArgRegNum; assert((unsigned char)numRegsPartial == numRegsPartial); call->fgArgInfo->SplitArg(argIndex, numRegsPartial, size - numRegsPartial); } #endif // FEATURE_ARG_SPLIT if (passUsingFloatRegs) { fltArgRegNum += size; #ifdef WINDOWS_AMD64_ABI // Whenever we pass an integer register argument // we skip the corresponding floating point register argument intArgRegNum = min(intArgRegNum + size, MAX_REG_ARG); #endif // WINDOWS_AMD64_ABI // No supported architecture supports partial structs using float registers. assert(fltArgRegNum <= MAX_FLOAT_REG_ARG); } else { // Increment intArgRegNum by 'size' registers intArgRegNum += size; #ifdef WINDOWS_AMD64_ABI fltArgRegNum = min(fltArgRegNum + size, MAX_FLOAT_REG_ARG); #endif // WINDOWS_AMD64_ABI } } } } } else // We have an argument that is not passed in a register { // This is a stack argument - put it in the table newArgEntry = call->fgArgInfo->AddStkArg(argIndex, argx, args, size, byteSize, argAlignBytes, isStructArg, isFloatHfa, callIsVararg); #ifdef UNIX_AMD64_ABI // TODO-Amd64-Unix-CQ: This is temporary (see also in fgMorphArgs). if (structDesc.passedInRegisters) { newArgEntry->structDesc.CopyFrom(structDesc); } #endif } newArgEntry->nonStandardArgKind = nonStandardArgKind; if (GlobalJitOptions::compFeatureHfa) { if (isHfaArg) { newArgEntry->SetHfaType(hfaType, hfaSlots); } } newArgEntry->SetMultiRegNums(); noway_assert(newArgEntry != nullptr); if (newArgEntry->isStruct) { newArgEntry->passedByRef = passStructByRef; newArgEntry->argType = (structBaseType == TYP_UNKNOWN) ? argx->TypeGet() : structBaseType; } else { newArgEntry->argType = argx->TypeGet(); } DEBUG_ARG_SLOTS_ONLY(argSlots += size;) } // end foreach argument loop #ifdef DEBUG if (verbose) { JITDUMP("ArgTable for %d.%s after fgInitArgInfo:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); call->fgArgInfo->Dump(this); JITDUMP("\n"); } #endif } //------------------------------------------------------------------------ // fgMorphArgs: Walk and transform (morph) the arguments of a call // // Arguments: // callNode - the call for which we are doing the argument morphing // // Return Value: // Like most morph methods, this method returns the morphed node, // though in this case there are currently no scenarios where the // node itself is re-created. // // Notes: // This calls fgInitArgInfo to create the 'fgArgInfo' for the call. // If it has already been created, that method will simply return. // // This method changes the state of the call node. It uses the existence // of gtCallLateArgs (the late arguments list) to determine if it has // already done the first round of morphing. // // The first time it is called (i.e. during global morphing), this method // computes the "late arguments". This is when it determines which arguments // need to be evaluated to temps prior to the main argument setup, and which // can be directly evaluated into the argument location. It also creates a // second argument list (gtCallLateArgs) that does the final placement of the // arguments, e.g. into registers or onto the stack. // // The "non-late arguments", aka the gtCallArgs, are doing the in-order // evaluation of the arguments that might have side-effects, such as embedded // assignments, calls or possible throws. In these cases, it and earlier // arguments must be evaluated to temps. // // On targets with a fixed outgoing argument area (FEATURE_FIXED_OUT_ARGS), // if we have any nested calls, we need to defer the copying of the argument // into the fixed argument area until after the call. If the argument did not // otherwise need to be computed into a temp, it is moved to gtCallLateArgs and // replaced in the "early" arg list (gtCallArgs) with a placeholder node. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) { GenTreeCall::Use* args; GenTree* argx; GenTreeFlags flagsSummary = GTF_EMPTY; unsigned argIndex = 0; DEBUG_ARG_SLOTS_ONLY(unsigned argSlots = 0;) bool reMorphing = call->AreArgsComplete(); // Set up the fgArgInfo. fgInitArgInfo(call); JITDUMP("%sMorphing args for %d.%s:\n", (reMorphing) ? "Re" : "", call->gtTreeID, GenTree::OpName(call->gtOper)); // If we are remorphing, process the late arguments (which were determined by a previous caller). if (reMorphing) { for (GenTreeCall::Use& use : call->LateArgs()) { use.SetNode(fgMorphTree(use.GetNode())); flagsSummary |= use.GetNode()->gtFlags; } assert(call->fgArgInfo != nullptr); } call->fgArgInfo->RemorphReset(); // First we morph the argument subtrees ('this' pointer, arguments, etc.). // During the first call to fgMorphArgs we also record the // information about late arguments we have in 'fgArgInfo'. // This information is used later to contruct the gtCallLateArgs */ // Process the 'this' argument value, if present. if (call->gtCallThisArg != nullptr) { argx = call->gtCallThisArg->GetNode(); fgArgTabEntry* thisArgEntry = call->fgArgInfo->GetArgEntry(0, reMorphing); argx = fgMorphTree(argx); call->gtCallThisArg->SetNode(argx); // This is a register argument - possibly update it in the table. call->fgArgInfo->UpdateRegArg(thisArgEntry, argx, reMorphing); flagsSummary |= argx->gtFlags; if (!reMorphing && call->IsExpandedEarly() && call->IsVirtualVtable()) { if (!argx->OperIsLocal()) { thisArgEntry->needTmp = true; call->fgArgInfo->SetNeedsTemps(); } } assert(argIndex == 0); argIndex++; DEBUG_ARG_SLOTS_ONLY(argSlots++;) } // Note that this name is a bit of a misnomer - it indicates that there are struct args // that occupy more than a single slot that are passed by value (not necessarily in regs). bool hasMultiregStructArgs = false; for (args = call->gtCallArgs; args != nullptr; args = args->GetNext(), argIndex++) { GenTree** parentArgx = &args->NodeRef(); fgArgTabEntry* argEntry = call->fgArgInfo->GetArgEntry(argIndex, reMorphing); // Morph the arg node, and update the parent and argEntry pointers. argx = *parentArgx; argx = fgMorphTree(argx); *parentArgx = argx; assert(argx == args->GetNode()); DEBUG_ARG_SLOTS_ONLY(unsigned size = argEntry->getSize();) CORINFO_CLASS_HANDLE copyBlkClass = NO_CLASS_HANDLE; #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { if (argEntry->GetByteAlignment() == 2 * TARGET_POINTER_SIZE) { if (argSlots % 2 == 1) { argSlots++; } } } #endif // DEBUG if (argEntry->isNonStandard() && argEntry->isPassedInRegisters()) { // We need to update the node field for this nonStandard arg here // as it may have been changed by the call to fgMorphTree. call->fgArgInfo->UpdateRegArg(argEntry, argx, reMorphing); flagsSummary |= argx->gtFlags; continue; } DEBUG_ARG_SLOTS_ASSERT(size != 0); DEBUG_ARG_SLOTS_ONLY(argSlots += argEntry->getSlotCount();) if (argx->IsLocalAddrExpr() != nullptr) { argx->gtType = TYP_I_IMPL; } // Get information about this argument. var_types hfaType = argEntry->GetHfaType(); bool isHfaArg = (hfaType != TYP_UNDEF); bool passUsingFloatRegs = argEntry->isPassedInFloatRegisters(); unsigned structSize = 0; // Struct arguments may be morphed into a node that is not a struct type. // In such case the fgArgTabEntry keeps track of whether the original node (before morphing) // was a struct and the struct classification. bool isStructArg = argEntry->isStruct; GenTree* argObj = argx->gtEffectiveVal(true /*commaOnly*/); if (isStructArg && varTypeIsStruct(argObj) && !argObj->OperIs(GT_ASG, GT_MKREFANY, GT_FIELD_LIST, GT_ARGPLACE)) { CORINFO_CLASS_HANDLE objClass = gtGetStructHandle(argObj); unsigned originalSize; if (argObj->TypeGet() == TYP_STRUCT) { if (argObj->OperIs(GT_OBJ)) { // Get the size off the OBJ node. originalSize = argObj->AsObj()->GetLayout()->GetSize(); assert(originalSize == info.compCompHnd->getClassSize(objClass)); } else { // We have a BADCODE assert for this in fgInitArgInfo. assert(argObj->OperIs(GT_LCL_VAR)); originalSize = lvaGetDesc(argObj->AsLclVarCommon())->lvExactSize; } } else { originalSize = genTypeSize(argx); assert(originalSize == info.compCompHnd->getClassSize(objClass)); } unsigned roundupSize = (unsigned)roundUp(originalSize, TARGET_POINTER_SIZE); var_types structBaseType = argEntry->argType; // First, handle the case where the argument is passed by reference. if (argEntry->passedByRef) { DEBUG_ARG_SLOTS_ASSERT(size == 1); copyBlkClass = objClass; #ifdef UNIX_AMD64_ABI assert(!"Structs are not passed by reference on x64/ux"); #endif // UNIX_AMD64_ABI } else // This is passed by value. { // Check to see if we can transform this into load of a primitive type. // 'size' must be the number of pointer sized items DEBUG_ARG_SLOTS_ASSERT(size == roundupSize / TARGET_POINTER_SIZE); structSize = originalSize; unsigned passingSize = originalSize; // Check to see if we can transform this struct load (GT_OBJ) into a GT_IND of the appropriate size. // When it can do this is platform-dependent: // - In general, it can be done for power of 2 structs that fit in a single register. // - For ARM and ARM64 it must also be a non-HFA struct, or have a single field. // - This is irrelevant for X86, since structs are always passed by value on the stack. GenTree* lclVar = fgIsIndirOfAddrOfLocal(argObj); bool canTransform = false; if (structBaseType != TYP_STRUCT) { if (isPow2(passingSize)) { canTransform = (!argEntry->IsHfaArg() || (passingSize == genTypeSize(argEntry->GetHfaType()))); } #if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) // For ARM64 or AMD64/UX we can pass non-power-of-2 structs in a register, but we can // only transform in that case if the arg is a local. // TODO-CQ: This transformation should be applicable in general, not just for the ARM64 // or UNIX_AMD64_ABI cases where they will be passed in registers. else { canTransform = (lclVar != nullptr); passingSize = genTypeSize(structBaseType); } #endif // TARGET_ARM64 || UNIX_AMD64_ABI } if (!canTransform) { #if defined(TARGET_AMD64) #ifndef UNIX_AMD64_ABI // On Windows structs are always copied and passed by reference (handled above) unless they are // passed by value in a single register. assert(size == 1); copyBlkClass = objClass; #else // UNIX_AMD64_ABI // On Unix, structs are always passed by value. // We only need a copy if we have one of the following: // - The sizes don't match for a non-lclVar argument. // - We have a known struct type (e.g. SIMD) that requires multiple registers. // TODO-Amd64-Unix-Throughput: We don't need to keep the structDesc in the argEntry if it's not // actually passed in registers. if (argEntry->isPassedInRegisters()) { if (argObj->OperIs(GT_OBJ)) { if (passingSize != structSize) { copyBlkClass = objClass; } } else if (lclVar == nullptr) { // This should only be the case of a value directly producing a known struct type. assert(argObj->TypeGet() != TYP_STRUCT); if (argEntry->numRegs > 1) { copyBlkClass = objClass; } } } #endif // UNIX_AMD64_ABI #elif defined(TARGET_ARM64) if ((passingSize != structSize) && (lclVar == nullptr)) { copyBlkClass = objClass; } #endif #ifdef TARGET_ARM // TODO-1stClassStructs: Unify these conditions across targets. if (((lclVar != nullptr) && (lvaGetPromotionType(lclVar->AsLclVarCommon()->GetLclNum()) == PROMOTION_TYPE_INDEPENDENT)) || ((argObj->OperIs(GT_OBJ)) && (passingSize != structSize))) { copyBlkClass = objClass; } if (structSize < TARGET_POINTER_SIZE) { copyBlkClass = objClass; } #endif // TARGET_ARM } else { // We have a struct argument that fits into a register, and it is either a power of 2, // or a local. // Change our argument, as needed, into a value of the appropriate type. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM DEBUG_ARG_SLOTS_ASSERT((size == 1) || ((structBaseType == TYP_DOUBLE) && (size == 2))); #else DEBUG_ARG_SLOTS_ASSERT((size == 1) || (varTypeIsSIMD(structBaseType) && size == (genTypeSize(structBaseType) / REGSIZE_BYTES))); #endif assert((structBaseType != TYP_STRUCT) && (genTypeSize(structBaseType) >= originalSize)); if (argObj->OperIs(GT_OBJ)) { argObj->ChangeOper(GT_IND); // Now see if we can fold *(&X) into X if (argObj->AsOp()->gtOp1->gtOper == GT_ADDR) { GenTree* temp = argObj->AsOp()->gtOp1->AsOp()->gtOp1; // Keep the DONT_CSE flag in sync // (as the addr always marks it for its op1) temp->gtFlags &= ~GTF_DONT_CSE; temp->gtFlags |= (argObj->gtFlags & GTF_DONT_CSE); DEBUG_DESTROY_NODE(argObj->AsOp()->gtOp1); // GT_ADDR DEBUG_DESTROY_NODE(argObj); // GT_IND argObj = temp; *parentArgx = temp; argx = temp; } } if (argObj->gtOper == GT_LCL_VAR) { unsigned lclNum = argObj->AsLclVarCommon()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvPromoted) { if (varDsc->lvFieldCnt == 1) { // get the first and only promoted field LclVarDsc* fieldVarDsc = lvaGetDesc(varDsc->lvFieldLclStart); if (genTypeSize(fieldVarDsc->TypeGet()) >= originalSize) { // we will use the first and only promoted field argObj->AsLclVarCommon()->SetLclNum(varDsc->lvFieldLclStart); if (varTypeIsEnregisterable(fieldVarDsc->TypeGet()) && (genTypeSize(fieldVarDsc->TypeGet()) == originalSize)) { // Just use the existing field's type argObj->gtType = fieldVarDsc->TypeGet(); } else { // Can't use the existing field's type, so use GT_LCL_FLD to swizzle // to a new type lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); argObj->ChangeOper(GT_LCL_FLD); argObj->gtType = structBaseType; } assert(varTypeIsEnregisterable(argObj->TypeGet())); assert(copyBlkClass == NO_CLASS_HANDLE); } else { // use GT_LCL_FLD to swizzle the single field struct to a new type lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); argObj->ChangeOper(GT_LCL_FLD); argObj->gtType = structBaseType; } } else { // The struct fits into a single register, but it has been promoted into its // constituent fields, and so we have to re-assemble it copyBlkClass = objClass; } } else if (genTypeSize(varDsc->TypeGet()) != genTypeSize(structBaseType)) { // Not a promoted struct, so just swizzle the type by using GT_LCL_FLD lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); argObj->ChangeOper(GT_LCL_FLD); argObj->gtType = structBaseType; } } else { // Not a GT_LCL_VAR, so we can just change the type on the node argObj->gtType = structBaseType; } assert(varTypeIsEnregisterable(argObj->TypeGet()) || ((copyBlkClass != NO_CLASS_HANDLE) && varTypeIsEnregisterable(structBaseType))); } #if !defined(UNIX_AMD64_ABI) && !defined(TARGET_ARMARCH) // TODO-CQ-XARCH: there is no need for a temp copy if we improve our code generation in // `genPutStructArgStk` for xarch like we did it for Arm/Arm64. // We still have a struct unless we converted the GT_OBJ into a GT_IND above... if (isHfaArg && passUsingFloatRegs) { } else if (structBaseType == TYP_STRUCT) { // If the valuetype size is not a multiple of TARGET_POINTER_SIZE, // we must copyblk to a temp before doing the obj to avoid // the obj reading memory past the end of the valuetype CLANG_FORMAT_COMMENT_ANCHOR; if (roundupSize > originalSize) { copyBlkClass = objClass; // There are a few special cases where we can omit using a CopyBlk // where we normally would need to use one. if (argObj->OperIs(GT_OBJ) && argObj->AsObj()->gtGetOp1()->IsLocalAddrExpr() != nullptr) // Is the source a LclVar? { copyBlkClass = NO_CLASS_HANDLE; } } } #endif // !UNIX_AMD64_ABI } } if (argEntry->isPassedInRegisters()) { call->fgArgInfo->UpdateRegArg(argEntry, argx, reMorphing); } else { call->fgArgInfo->UpdateStkArg(argEntry, argx, reMorphing); } if (copyBlkClass != NO_CLASS_HANDLE) { fgMakeOutgoingStructArgCopy(call, args, copyBlkClass); } if (argx->gtOper == GT_MKREFANY) { // 'Lower' the MKREFANY tree and insert it. noway_assert(!reMorphing); #ifdef TARGET_X86 // Build the mkrefany as a GT_FIELD_LIST GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList(); fieldList->AddField(this, argx->AsOp()->gtGetOp1(), OFFSETOF__CORINFO_TypedReference__dataPtr, TYP_BYREF); fieldList->AddField(this, argx->AsOp()->gtGetOp2(), OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL); fgArgTabEntry* fp = gtArgEntryByNode(call, argx); args->SetNode(fieldList); assert(fp->GetNode() == fieldList); #else // !TARGET_X86 // Get a new temp // Here we don't need unsafe value cls check since the addr of temp is used only in mkrefany unsigned tmp = lvaGrabTemp(true DEBUGARG("by-value mkrefany struct argument")); lvaSetStruct(tmp, impGetRefAnyClass(), false); // Build the mkrefany as a comma node: // (tmp.ptr=argx),(tmp.type=handle) GenTreeLclFld* destPtrSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__dataPtr); GenTreeLclFld* destTypeSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__type); destPtrSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyDataField())); destPtrSlot->gtFlags |= GTF_VAR_DEF; destTypeSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField())); destTypeSlot->gtFlags |= GTF_VAR_DEF; GenTree* asgPtrSlot = gtNewAssignNode(destPtrSlot, argx->AsOp()->gtOp1); GenTree* asgTypeSlot = gtNewAssignNode(destTypeSlot, argx->AsOp()->gtOp2); GenTree* asg = gtNewOperNode(GT_COMMA, TYP_VOID, asgPtrSlot, asgTypeSlot); // Change the expression to "(tmp=val)" args->SetNode(asg); // EvalArgsToTemps will cause tmp to actually get loaded as the argument call->fgArgInfo->EvalToTmp(argEntry, tmp, asg); lvaSetVarAddrExposed(tmp DEBUGARG(AddressExposedReason::TOO_CONSERVATIVE)); #endif // !TARGET_X86 } #if FEATURE_MULTIREG_ARGS if (isStructArg) { if (((argEntry->numRegs + argEntry->GetStackSlotsNumber()) > 1) || (isHfaArg && argx->TypeGet() == TYP_STRUCT)) { hasMultiregStructArgs = true; } } #ifdef TARGET_ARM else if ((argEntry->argType == TYP_LONG) || (argEntry->argType == TYP_DOUBLE)) { assert((argEntry->numRegs == 2) || (argEntry->numSlots == 2)); } #endif else { // We must have exactly one register or slot. assert(((argEntry->numRegs == 1) && (argEntry->GetStackSlotsNumber() == 0)) || ((argEntry->numRegs == 0) && (argEntry->GetStackSlotsNumber() == 1))); } #endif #if defined(TARGET_X86) if (isStructArg) { GenTree* lclNode = argx->OperIs(GT_LCL_VAR) ? argx : fgIsIndirOfAddrOfLocal(argx); if ((lclNode != nullptr) && (lvaGetPromotionType(lclNode->AsLclVarCommon()->GetLclNum()) == Compiler::PROMOTION_TYPE_INDEPENDENT)) { // Make a GT_FIELD_LIST of the field lclVars. GenTreeLclVarCommon* lcl = lclNode->AsLclVarCommon(); LclVarDsc* varDsc = lvaGetDesc(lcl); GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList(); fgArgTabEntry* fp = gtArgEntryByNode(call, argx); args->SetNode(fieldList); assert(fp->GetNode() == fieldList); for (unsigned fieldLclNum = varDsc->lvFieldLclStart; fieldLclNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++fieldLclNum) { LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); GenTree* fieldLcl; if (fieldLclNum == varDsc->lvFieldLclStart) { lcl->SetLclNum(fieldLclNum); lcl->SetOperResetFlags(GT_LCL_VAR); lcl->gtType = fieldVarDsc->TypeGet(); fieldLcl = lcl; } else { fieldLcl = gtNewLclvNode(fieldLclNum, fieldVarDsc->TypeGet()); } fieldList->AddField(this, fieldLcl, fieldVarDsc->lvFldOffset, fieldVarDsc->TypeGet()); } } } #endif // TARGET_X86 flagsSummary |= args->GetNode()->gtFlags; } // end foreach argument loop if (!reMorphing) { call->fgArgInfo->ArgsComplete(); } /* Process the function address, if indirect call */ if (call->gtCallType == CT_INDIRECT) { call->gtCallAddr = fgMorphTree(call->gtCallAddr); // Const CSE may create an assignment node here flagsSummary |= call->gtCallAddr->gtFlags; } #if FEATURE_FIXED_OUT_ARGS // Record the outgoing argument size. If the call is a fast tail // call, it will setup its arguments in incoming arg area instead // of the out-going arg area, so we don't need to track the // outgoing arg size. if (!call->IsFastTailCall()) { #if defined(UNIX_AMD64_ABI) // This is currently required for the UNIX ABI to work correctly. opts.compNeedToAlignFrame = true; #endif // UNIX_AMD64_ABI const unsigned outgoingArgSpaceSize = GetOutgoingArgByteSize(call->fgArgInfo->GetNextSlotByteOffset()); #if defined(DEBUG_ARG_SLOTS) unsigned preallocatedArgCount = 0; if (!compMacOsArm64Abi()) { preallocatedArgCount = call->fgArgInfo->GetNextSlotNum(); assert(outgoingArgSpaceSize == preallocatedArgCount * REGSIZE_BYTES); } #endif call->fgArgInfo->SetOutArgSize(max(outgoingArgSpaceSize, MIN_ARG_AREA_FOR_CALL)); #ifdef DEBUG if (verbose) { const fgArgInfo* argInfo = call->fgArgInfo; #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { printf("argSlots=%d, preallocatedArgCount=%d, nextSlotNum=%d, nextSlotByteOffset=%d, " "outgoingArgSpaceSize=%d\n", argSlots, preallocatedArgCount, argInfo->GetNextSlotNum(), argInfo->GetNextSlotByteOffset(), outgoingArgSpaceSize); } else { printf("nextSlotByteOffset=%d, outgoingArgSpaceSize=%d\n", argInfo->GetNextSlotByteOffset(), outgoingArgSpaceSize); } #else printf("nextSlotByteOffset=%d, outgoingArgSpaceSize=%d\n", argInfo->GetNextSlotByteOffset(), outgoingArgSpaceSize); #endif } #endif } #endif // FEATURE_FIXED_OUT_ARGS // Clear the ASG and EXCEPT (if possible) flags on the call node call->gtFlags &= ~GTF_ASG; if (!call->OperMayThrow(this)) { call->gtFlags &= ~GTF_EXCEPT; } // Union in the side effect flags from the call's operands call->gtFlags |= flagsSummary & GTF_ALL_EFFECT; // If we are remorphing or don't have any register arguments or other arguments that need // temps, then we don't need to call SortArgs() and EvalArgsToTemps(). // if (!reMorphing && (call->fgArgInfo->HasRegArgs() || call->fgArgInfo->NeedsTemps())) { // Do the 'defer or eval to temp' analysis. call->fgArgInfo->SortArgs(); call->fgArgInfo->EvalArgsToTemps(); } if (hasMultiregStructArgs) { fgMorphMultiregStructArgs(call); } #ifdef DEBUG if (verbose) { JITDUMP("ArgTable for %d.%s after fgMorphArgs:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); call->fgArgInfo->Dump(this); JITDUMP("\n"); } #endif return call; } #ifdef _PREFAST_ #pragma warning(pop) #endif //----------------------------------------------------------------------------- // fgMorphMultiregStructArgs: Locate the TYP_STRUCT arguments and // call fgMorphMultiregStructArg on each of them. // // Arguments: // call : a GenTreeCall node that has one or more TYP_STRUCT arguments\. // // Notes: // We only call fgMorphMultiregStructArg for struct arguments that are not passed as simple types. // It will ensure that the struct arguments are in the correct form. // If this method fails to find any TYP_STRUCT arguments it will assert. // void Compiler::fgMorphMultiregStructArgs(GenTreeCall* call) { bool foundStructArg = false; GenTreeFlags flagsSummary = GTF_EMPTY; #ifdef TARGET_X86 assert(!"Logic error: no MultiregStructArgs for X86"); #endif #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) assert(!"Logic error: no MultiregStructArgs for Windows X64 ABI"); #endif for (GenTreeCall::Use& use : call->Args()) { // For late arguments the arg tree that is overridden is in the gtCallLateArgs list. // For such late args the gtCallArgList contains the setup arg node (evaluating the arg.) // The tree from the gtCallLateArgs list is passed to the callee. The fgArgEntry node contains the mapping // between the nodes in both lists. If the arg is not a late arg, the fgArgEntry->node points to itself, // otherwise points to the list in the late args list. bool isLateArg = (use.GetNode()->gtFlags & GTF_LATE_ARG) != 0; fgArgTabEntry* fgEntryPtr = gtArgEntryByNode(call, use.GetNode()); assert(fgEntryPtr != nullptr); GenTree* argx = fgEntryPtr->GetNode(); GenTreeCall::Use* lateUse = nullptr; GenTree* lateNode = nullptr; if (isLateArg) { for (GenTreeCall::Use& lateArgUse : call->LateArgs()) { GenTree* argNode = lateArgUse.GetNode(); if (argx == argNode) { lateUse = &lateArgUse; lateNode = argNode; break; } } assert((lateUse != nullptr) && (lateNode != nullptr)); } if (!fgEntryPtr->isStruct) { continue; } unsigned size = (fgEntryPtr->numRegs + fgEntryPtr->GetStackSlotsNumber()); if ((size > 1) || (fgEntryPtr->IsHfaArg() && argx->TypeGet() == TYP_STRUCT)) { foundStructArg = true; if (varTypeIsStruct(argx) && !argx->OperIs(GT_FIELD_LIST)) { if (fgEntryPtr->IsHfaRegArg()) { var_types hfaType = fgEntryPtr->GetHfaType(); unsigned structSize; if (argx->OperIs(GT_OBJ)) { structSize = argx->AsObj()->GetLayout()->GetSize(); } else if (varTypeIsSIMD(argx)) { structSize = genTypeSize(argx); } else { assert(argx->OperIs(GT_LCL_VAR)); structSize = lvaGetDesc(argx->AsLclVar())->lvExactSize; } assert(structSize > 0); if (structSize == genTypeSize(hfaType)) { if (argx->OperIs(GT_OBJ)) { argx->SetOper(GT_IND); } argx->gtType = hfaType; } } GenTree* newArgx = fgMorphMultiregStructArg(argx, fgEntryPtr); // Did we replace 'argx' with a new tree? if (newArgx != argx) { // link the new arg node into either the late arg list or the gtCallArgs list if (isLateArg) { lateUse->SetNode(newArgx); } else { use.SetNode(newArgx); } assert(fgEntryPtr->GetNode() == newArgx); } } } } // We should only call this method when we actually have one or more multireg struct args assert(foundStructArg); // Update the flags call->gtFlags |= (flagsSummary & GTF_ALL_EFFECT); } //----------------------------------------------------------------------------- // fgMorphMultiregStructArg: Given a TYP_STRUCT arg from a call argument list, // morph the argument as needed to be passed correctly. // // Arguments: // arg - A GenTree node containing a TYP_STRUCT arg // fgEntryPtr - the fgArgTabEntry information for the current 'arg' // // Notes: // The arg must be a GT_OBJ or GT_LCL_VAR or GT_LCL_FLD of TYP_STRUCT. // If 'arg' is a lclVar passed on the stack, we will ensure that any lclVars that must be on the // stack are marked as doNotEnregister, and then we return. // // If it is passed by register, we mutate the argument into the GT_FIELD_LIST form // which is only used for struct arguments. // // If arg is a LclVar we check if it is struct promoted and has the right number of fields // and if they are at the appropriate offsets we will use the struct promted fields // in the GT_FIELD_LIST nodes that we create. // If we have a GT_LCL_VAR that isn't struct promoted or doesn't meet the requirements // we will use a set of GT_LCL_FLDs nodes to access the various portions of the struct // this also forces the struct to be stack allocated into the local frame. // For the GT_OBJ case will clone the address expression and generate two (or more) // indirections. // Currently the implementation handles ARM64/ARM and will NYI for other architectures. // GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr) { assert(varTypeIsStruct(arg->TypeGet())); #if !defined(TARGET_ARMARCH) && !defined(UNIX_AMD64_ABI) NYI("fgMorphMultiregStructArg requires implementation for this target"); #endif #ifdef TARGET_ARM if ((fgEntryPtr->IsSplit() && fgEntryPtr->GetStackSlotsNumber() + fgEntryPtr->numRegs > 4) || (!fgEntryPtr->IsSplit() && fgEntryPtr->GetRegNum() == REG_STK)) #else if (fgEntryPtr->GetRegNum() == REG_STK) #endif { GenTreeLclVarCommon* lcl = nullptr; GenTree* actualArg = arg->gtEffectiveVal(); if (actualArg->OperGet() == GT_OBJ) { if (actualArg->gtGetOp1()->OperIs(GT_ADDR) && actualArg->gtGetOp1()->gtGetOp1()->OperIs(GT_LCL_VAR)) { lcl = actualArg->gtGetOp1()->gtGetOp1()->AsLclVarCommon(); } } else if (actualArg->OperGet() == GT_LCL_VAR) { lcl = actualArg->AsLclVarCommon(); } if (lcl != nullptr) { if (lvaGetPromotionType(lcl->GetLclNum()) == PROMOTION_TYPE_INDEPENDENT) { arg = fgMorphLclArgToFieldlist(lcl); } else if (arg->TypeGet() == TYP_STRUCT) { // If this is a non-register struct, it must be referenced from memory. if (!actualArg->OperIs(GT_OBJ)) { // Create an Obj of the temp to use it as a call argument. arg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, arg); arg = gtNewObjNode(lvaGetStruct(lcl->GetLclNum()), arg); } // Its fields will need to be accessed by address. lvaSetVarDoNotEnregister(lcl->GetLclNum() DEBUG_ARG(DoNotEnregisterReason::IsStructArg)); } } return arg; } #if FEATURE_MULTIREG_ARGS // Examine 'arg' and setup argValue objClass and structSize // const CORINFO_CLASS_HANDLE objClass = gtGetStructHandle(arg); GenTree* argValue = arg; // normally argValue will be arg, but see right below unsigned structSize = 0; if (arg->TypeGet() != TYP_STRUCT) { structSize = genTypeSize(arg->TypeGet()); assert(structSize == info.compCompHnd->getClassSize(objClass)); } else if (arg->OperGet() == GT_OBJ) { GenTreeObj* argObj = arg->AsObj(); const ClassLayout* objLayout = argObj->GetLayout(); structSize = objLayout->GetSize(); assert(structSize == info.compCompHnd->getClassSize(objClass)); // If we have a GT_OBJ of a GT_ADDR then we set argValue to the child node of the GT_ADDR. GenTree* op1 = argObj->gtOp1; if (op1->OperGet() == GT_ADDR) { GenTree* underlyingTree = op1->AsOp()->gtOp1; // Only update to the same type. if (underlyingTree->OperIs(GT_LCL_VAR)) { const LclVarDsc* varDsc = lvaGetDesc(underlyingTree->AsLclVar()); if (ClassLayout::AreCompatible(varDsc->GetLayout(), objLayout)) { argValue = underlyingTree; } } } } else if (arg->OperGet() == GT_LCL_VAR) { LclVarDsc* varDsc = lvaGetDesc(arg->AsLclVarCommon()); structSize = varDsc->lvExactSize; assert(structSize == info.compCompHnd->getClassSize(objClass)); } else { structSize = info.compCompHnd->getClassSize(objClass); } var_types hfaType = TYP_UNDEF; var_types elemType = TYP_UNDEF; unsigned elemCount = 0; unsigned elemSize = 0; var_types type[MAX_ARG_REG_COUNT] = {}; // TYP_UNDEF = 0 hfaType = fgEntryPtr->GetHfaType(); if (varTypeIsValidHfaType(hfaType) && fgEntryPtr->isPassedInFloatRegisters()) { elemType = hfaType; elemSize = genTypeSize(elemType); elemCount = structSize / elemSize; assert(elemSize * elemCount == structSize); for (unsigned inx = 0; inx < elemCount; inx++) { type[inx] = elemType; } } else { assert(structSize <= MAX_ARG_REG_COUNT * TARGET_POINTER_SIZE); BYTE gcPtrs[MAX_ARG_REG_COUNT]; elemCount = roundUp(structSize, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; info.compCompHnd->getClassGClayout(objClass, &gcPtrs[0]); for (unsigned inx = 0; inx < elemCount; inx++) { #ifdef UNIX_AMD64_ABI if (gcPtrs[inx] == TYPE_GC_NONE) { type[inx] = GetTypeFromClassificationAndSizes(fgEntryPtr->structDesc.eightByteClassifications[inx], fgEntryPtr->structDesc.eightByteSizes[inx]); } else #endif // UNIX_AMD64_ABI { type[inx] = getJitGCType(gcPtrs[inx]); } } #ifndef UNIX_AMD64_ABI if ((argValue->OperGet() == GT_LCL_FLD) || (argValue->OperGet() == GT_LCL_VAR)) { elemSize = TARGET_POINTER_SIZE; // We can safely widen this to aligned bytes since we are loading from // a GT_LCL_VAR or a GT_LCL_FLD which is properly padded and // lives in the stack frame or will be a promoted field. // structSize = elemCount * TARGET_POINTER_SIZE; } else // we must have a GT_OBJ { assert(argValue->OperGet() == GT_OBJ); // We need to load the struct from an arbitrary address // and we can't read past the end of the structSize // We adjust the last load type here // unsigned remainingBytes = structSize % TARGET_POINTER_SIZE; unsigned lastElem = elemCount - 1; if (remainingBytes != 0) { switch (remainingBytes) { case 1: type[lastElem] = TYP_BYTE; break; case 2: type[lastElem] = TYP_SHORT; break; #if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) case 4: type[lastElem] = TYP_INT; break; #endif // (TARGET_ARM64) || (UNIX_AMD64_ABI) default: noway_assert(!"NYI: odd sized struct in fgMorphMultiregStructArg"); break; } } } #endif // !UNIX_AMD64_ABI } // We should still have a TYP_STRUCT assert(varTypeIsStruct(argValue->TypeGet())); GenTreeFieldList* newArg = nullptr; // Are we passing a struct LclVar? // if (argValue->OperGet() == GT_LCL_VAR) { GenTreeLclVarCommon* varNode = argValue->AsLclVarCommon(); unsigned varNum = varNode->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(varNum); // At this point any TYP_STRUCT LclVar must be an aligned struct // or an HFA struct, both which are passed by value. // assert((varDsc->lvSize() == elemCount * TARGET_POINTER_SIZE) || varDsc->lvIsHfa()); varDsc->lvIsMultiRegArg = true; #ifdef DEBUG if (verbose) { JITDUMP("Multireg struct argument V%02u : ", varNum); fgEntryPtr->Dump(); } #endif // DEBUG #ifndef UNIX_AMD64_ABI // This local variable must match the layout of the 'objClass' type exactly if (varDsc->lvIsHfa() && fgEntryPtr->isPassedInFloatRegisters()) { // We have a HFA struct. noway_assert(elemType == varDsc->GetHfaType()); noway_assert(elemSize == genTypeSize(elemType)); noway_assert(elemCount == (varDsc->lvExactSize / elemSize)); noway_assert(elemSize * elemCount == varDsc->lvExactSize); for (unsigned inx = 0; (inx < elemCount); inx++) { noway_assert(type[inx] == elemType); } } else { #if defined(TARGET_ARM64) // We must have a 16-byte struct (non-HFA) noway_assert(elemCount == 2); #elif defined(TARGET_ARM) noway_assert(elemCount <= 4); #endif for (unsigned inx = 0; inx < elemCount; inx++) { var_types currentGcLayoutType = varDsc->GetLayout()->GetGCPtrType(inx); // We setup the type[inx] value above using the GC info from 'objClass' // This GT_LCL_VAR must have the same GC layout info // if (varTypeIsGC(currentGcLayoutType)) { noway_assert(type[inx] == currentGcLayoutType); } else { // We may have use a small type when we setup the type[inx] values above // We can safely widen this to TYP_I_IMPL type[inx] = TYP_I_IMPL; } } } if (varDsc->lvPromoted && varDsc->lvIsHfa() && fgEntryPtr->isPassedInFloatRegisters()) { bool canMorphToFieldList = true; for (unsigned fldOffset = 0; fldOffset < varDsc->lvExactSize; fldOffset += elemSize) { const unsigned fldVarNum = lvaGetFieldLocal(varDsc, fldOffset); if ((fldVarNum == BAD_VAR_NUM) || !varTypeUsesFloatReg(lvaGetDesc(fldVarNum))) { canMorphToFieldList = false; break; } } if (canMorphToFieldList) { newArg = fgMorphLclArgToFieldlist(varNode); } } else #endif // !UNIX_AMD64_ABI #if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) // Is this LclVar a promoted struct with exactly 2 fields? if (varDsc->lvPromoted && (varDsc->lvFieldCnt == 2) && !varDsc->lvIsHfa()) { // See if we have two promoted fields that start at offset 0 and 8? unsigned loVarNum = lvaGetFieldLocal(varDsc, 0); unsigned hiVarNum = lvaGetFieldLocal(varDsc, TARGET_POINTER_SIZE); // Did we find the promoted fields at the necessary offsets? if ((loVarNum != BAD_VAR_NUM) && (hiVarNum != BAD_VAR_NUM)) { LclVarDsc* loVarDsc = lvaGetDesc(loVarNum); LclVarDsc* hiVarDsc = lvaGetDesc(hiVarNum); var_types loType = loVarDsc->lvType; var_types hiType = hiVarDsc->lvType; if ((varTypeIsFloating(loType) != genIsValidFloatReg(fgEntryPtr->GetRegNum(0))) || (varTypeIsFloating(hiType) != genIsValidFloatReg(fgEntryPtr->GetRegNum(1)))) { // TODO-LSRA - It currently doesn't support the passing of floating point LCL_VARS in the integer // registers. So for now we will use GT_LCLFLD's to pass this struct (it won't be enregistered) // JITDUMP("Multireg struct V%02u will be passed using GT_LCLFLD because it has float fields.\n", varNum); // // we call lvaSetVarDoNotEnregister and do the proper transformation below. // } else { // We can use the struct promoted field as the two arguments // Create a new tree for 'arg' // replace the existing LDOBJ(ADDR(LCLVAR)) // with a FIELD_LIST(LCLVAR-LO, FIELD_LIST(LCLVAR-HI, nullptr)) // newArg = new (this, GT_FIELD_LIST) GenTreeFieldList(); newArg->AddField(this, gtNewLclvNode(loVarNum, loType), 0, loType); newArg->AddField(this, gtNewLclvNode(hiVarNum, hiType), TARGET_POINTER_SIZE, hiType); } } } else { // // We will create a list of GT_LCL_FLDs nodes to pass this struct // lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField)); } #elif defined(TARGET_ARM) // Is this LclVar a promoted struct with exactly same size? if (varDsc->lvPromoted && (varDsc->lvFieldCnt == elemCount) && !varDsc->lvIsHfa()) { // See if we have promoted fields? unsigned varNums[4]; bool hasBadVarNum = false; for (unsigned inx = 0; inx < elemCount; inx++) { varNums[inx] = lvaGetFieldLocal(varDsc, TARGET_POINTER_SIZE * inx); if (varNums[inx] == BAD_VAR_NUM) { hasBadVarNum = true; break; } } // Did we find the promoted fields at the necessary offsets? if (!hasBadVarNum) { LclVarDsc* varDscs[4]; var_types varType[4]; bool varIsFloat = false; for (unsigned inx = 0; inx < elemCount; inx++) { varDscs[inx] = lvaGetDesc(varNums[inx]); varType[inx] = varDscs[inx]->lvType; if (varTypeIsFloating(varType[inx])) { // TODO-LSRA - It currently doesn't support the passing of floating point LCL_VARS in the // integer // registers. So for now we will use GT_LCLFLD's to pass this struct (it won't be enregistered) // JITDUMP("Multireg struct V%02u will be passed using GT_LCLFLD because it has float fields.\n", varNum); // // we call lvaSetVarDoNotEnregister and do the proper transformation below. // varIsFloat = true; break; } } if (!varIsFloat) { newArg = fgMorphLclArgToFieldlist(varNode); } } } else { // // We will create a list of GT_LCL_FLDs nodes to pass this struct // lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField)); } #endif // TARGET_ARM } // If we didn't set newarg to a new List Node tree // if (newArg == nullptr) { if (fgEntryPtr->GetRegNum() == REG_STK) { // We leave this stack passed argument alone return arg; } // Are we passing a GT_LCL_FLD (or a GT_LCL_VAR that was not struct promoted ) // A GT_LCL_FLD could also contain a 16-byte struct or HFA struct inside it? // if ((argValue->OperGet() == GT_LCL_FLD) || (argValue->OperGet() == GT_LCL_VAR)) { GenTreeLclVarCommon* varNode = argValue->AsLclVarCommon(); unsigned varNum = varNode->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(varNum); unsigned baseOffset = varNode->GetLclOffs(); unsigned lastOffset = baseOffset + structSize; // The allocated size of our LocalVar must be at least as big as lastOffset assert(varDsc->lvSize() >= lastOffset); if (varDsc->HasGCPtr()) { // alignment of the baseOffset is required noway_assert((baseOffset % TARGET_POINTER_SIZE) == 0); #ifndef UNIX_AMD64_ABI noway_assert(elemSize == TARGET_POINTER_SIZE); #endif unsigned baseIndex = baseOffset / TARGET_POINTER_SIZE; ClassLayout* layout = varDsc->GetLayout(); for (unsigned inx = 0; (inx < elemCount); inx++) { // The GC information must match what we setup using 'objClass' if (layout->IsGCPtr(baseIndex + inx) || varTypeGCtype(type[inx])) { noway_assert(type[inx] == layout->GetGCPtrType(baseIndex + inx)); } } } else // this varDsc contains no GC pointers { for (unsigned inx = 0; inx < elemCount; inx++) { // The GC information must match what we setup using 'objClass' noway_assert(!varTypeIsGC(type[inx])); } } // // We create a list of GT_LCL_FLDs nodes to pass this struct // lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField)); // Create a new tree for 'arg' // replace the existing LDOBJ(ADDR(LCLVAR)) // with a FIELD_LIST(LCLFLD-LO, LCLFLD-HI) // unsigned offset = baseOffset; newArg = new (this, GT_FIELD_LIST) GenTreeFieldList(); for (unsigned inx = 0; inx < elemCount; inx++) { GenTree* nextLclFld = gtNewLclFldNode(varNum, type[inx], offset); newArg->AddField(this, nextLclFld, offset, type[inx]); offset += genTypeSize(type[inx]); } } // Are we passing a GT_OBJ struct? // else if (argValue->OperGet() == GT_OBJ) { GenTreeObj* argObj = argValue->AsObj(); GenTree* baseAddr = argObj->gtOp1; var_types addrType = baseAddr->TypeGet(); if (baseAddr->OperGet() == GT_ADDR) { GenTree* addrTaken = baseAddr->AsOp()->gtOp1; if (addrTaken->IsLocal()) { GenTreeLclVarCommon* varNode = addrTaken->AsLclVarCommon(); unsigned varNum = varNode->GetLclNum(); // We access non-struct type (for example, long) as a struct type. // Make sure lclVar lives on stack to make sure its fields are accessible by address. lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::LocalField)); } } // Create a new tree for 'arg' // replace the existing LDOBJ(EXPR) // with a FIELD_LIST(IND(EXPR), FIELD_LIST(IND(EXPR+8), nullptr) ...) // newArg = new (this, GT_FIELD_LIST) GenTreeFieldList(); unsigned offset = 0; for (unsigned inx = 0; inx < elemCount; inx++) { GenTree* curAddr = baseAddr; if (offset != 0) { GenTree* baseAddrDup = gtCloneExpr(baseAddr); noway_assert(baseAddrDup != nullptr); curAddr = gtNewOperNode(GT_ADD, addrType, baseAddrDup, gtNewIconNode(offset, TYP_I_IMPL)); } else { curAddr = baseAddr; } GenTree* curItem = gtNewIndir(type[inx], curAddr); // For safety all GT_IND should have at least GT_GLOB_REF set. curItem->gtFlags |= GTF_GLOB_REF; newArg->AddField(this, curItem, offset, type[inx]); offset += genTypeSize(type[inx]); } } } #ifdef DEBUG // If we reach here we should have set newArg to something if (newArg == nullptr) { gtDispTree(argValue); assert(!"Missing case in fgMorphMultiregStructArg"); } #endif noway_assert(newArg != nullptr); #ifdef DEBUG if (verbose) { printf("fgMorphMultiregStructArg created tree:\n"); gtDispTree(newArg); } #endif arg = newArg; // consider calling fgMorphTree(newArg); #endif // FEATURE_MULTIREG_ARGS return arg; } //------------------------------------------------------------------------ // fgMorphLclArgToFieldlist: Morph a GT_LCL_VAR node to a GT_FIELD_LIST of its promoted fields // // Arguments: // lcl - The GT_LCL_VAR node we will transform // // Return value: // The new GT_FIELD_LIST that we have created. // GenTreeFieldList* Compiler::fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl) { LclVarDsc* varDsc = lvaGetDesc(lcl); assert(varDsc->lvPromoted); unsigned fieldCount = varDsc->lvFieldCnt; unsigned fieldLclNum = varDsc->lvFieldLclStart; GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList(); for (unsigned i = 0; i < fieldCount; i++) { LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); GenTree* lclVar = gtNewLclvNode(fieldLclNum, fieldVarDsc->TypeGet()); fieldList->AddField(this, lclVar, fieldVarDsc->lvFldOffset, fieldVarDsc->TypeGet()); fieldLclNum++; } return fieldList; } //------------------------------------------------------------------------ // fgMakeOutgoingStructArgCopy: make a copy of a struct variable if necessary, // to pass to a callee. // // Arguments: // call - call being processed // args - args for the call // copyBlkClass - class handle for the struct // // The arg is updated if necessary with the copy. // void Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass) { GenTree* argx = args->GetNode(); noway_assert(argx->gtOper != GT_MKREFANY); fgArgTabEntry* argEntry = Compiler::gtArgEntryByNode(call, argx); // If we're optimizing, see if we can avoid making a copy. // // We don't need a copy if this is the last use of an implicit by-ref local. // if (opts.OptimizationEnabled()) { GenTreeLclVar* const lcl = argx->IsImplicitByrefParameterValue(this); if (lcl != nullptr) { const unsigned varNum = lcl->GetLclNum(); LclVarDsc* const varDsc = lvaGetDesc(varNum); const unsigned short totalAppearances = varDsc->lvRefCnt(RCS_EARLY); // We don't have liveness so we rely on other indications of last use. // // We handle these cases: // // * (must not copy) If the call is a tail call, the use is a last use. // We must skip the copy if we have a fast tail call. // // * (may not copy) if the call is noreturn, the use is a last use. // We also check for just one reference here as we are not doing // alias analysis of the call's parameters, or checking if the call // site is not within some try region. // // * (may not copy) if there is exactly one use of the local in the method, // and the call is not in loop, this is a last use. // // fgMightHaveLoop() is expensive; check it last, only if necessary. // if (call->IsTailCall() || // ((totalAppearances == 1) && call->IsNoReturn()) || // ((totalAppearances == 1) && !fgMightHaveLoop())) { args->SetNode(lcl); assert(argEntry->GetNode() == lcl); JITDUMP("did not need to make outgoing copy for last use of implicit byref V%2d\n", varNum); return; } } } JITDUMP("making an outgoing copy for struct arg\n"); if (fgOutgoingArgTemps == nullptr) { fgOutgoingArgTemps = hashBv::Create(this); } unsigned tmp = 0; bool found = false; // Attempt to find a local we have already used for an outgoing struct and reuse it. // We do not reuse within a statement. if (!opts.MinOpts()) { indexType lclNum; FOREACH_HBV_BIT_SET(lclNum, fgOutgoingArgTemps) { LclVarDsc* varDsc = lvaGetDesc((unsigned)lclNum); if (typeInfo::AreEquivalent(varDsc->lvVerTypeInfo, typeInfo(TI_STRUCT, copyBlkClass)) && !fgCurrentlyInUseArgTemps->testBit(lclNum)) { tmp = (unsigned)lclNum; found = true; JITDUMP("reusing outgoing struct arg"); break; } } NEXT_HBV_BIT_SET; } // Create the CopyBlk tree and insert it. if (!found) { // Get a new temp // Here We don't need unsafe value cls check, since the addr of this temp is used only in copyblk. tmp = lvaGrabTemp(true DEBUGARG("by-value struct argument")); lvaSetStruct(tmp, copyBlkClass, false); if (call->IsVarargs()) { lvaSetStructUsedAsVarArg(tmp); } fgOutgoingArgTemps->setBit(tmp); } fgCurrentlyInUseArgTemps->setBit(tmp); // TYP_SIMD structs should not be enregistered, since ABI requires it to be // allocated on stack and address of it needs to be passed. if (lclVarIsSIMDType(tmp)) { // TODO: check if we need this block here or other parts already deal with it. lvaSetVarDoNotEnregister(tmp DEBUGARG(DoNotEnregisterReason::IsStructArg)); } // Create a reference to the temp GenTree* dest = gtNewLclvNode(tmp, lvaTable[tmp].lvType); dest->gtFlags |= (GTF_DONT_CSE | GTF_VAR_DEF); // This is a def of the local, "entire" by construction. // Copy the valuetype to the temp GenTree* copyBlk = gtNewBlkOpNode(dest, argx, false /* not volatile */, true /* copyBlock */); copyBlk = fgMorphCopyBlock(copyBlk); #if FEATURE_FIXED_OUT_ARGS // Do the copy early, and evalute the temp later (see EvalArgsToTemps) // When on Unix create LCL_FLD for structs passed in more than one registers. See fgMakeTmpArgNode GenTree* arg = copyBlk; #else // FEATURE_FIXED_OUT_ARGS // Structs are always on the stack, and thus never need temps // so we have to put the copy and temp all into one expression. argEntry->tmpNum = tmp; GenTree* arg = fgMakeTmpArgNode(argEntry); // Change the expression to "(tmp=val),tmp" arg = gtNewOperNode(GT_COMMA, arg->TypeGet(), copyBlk, arg); #endif // FEATURE_FIXED_OUT_ARGS args->SetNode(arg); call->fgArgInfo->EvalToTmp(argEntry, tmp, arg); } #ifdef TARGET_ARM // See declaration for specification comment. void Compiler::fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, regMaskTP* pArgSkippedRegMask) { assert(varDsc->lvPromoted); // There's no way to do these calculations without breaking abstraction and assuming that // integer register arguments are consecutive ints. They are on ARM. // To start, figure out what register contains the last byte of the first argument. LclVarDsc* firstFldVarDsc = lvaGetDesc(varDsc->lvFieldLclStart); unsigned lastFldRegOfLastByte = (firstFldVarDsc->lvFldOffset + firstFldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE; ; // Now we're keeping track of the register that the last field ended in; see what registers // subsequent fields start in, and whether any are skipped. // (We assume here the invariant that the fields are sorted in offset order.) for (unsigned fldVarOffset = 1; fldVarOffset < varDsc->lvFieldCnt; fldVarOffset++) { unsigned fldVarNum = varDsc->lvFieldLclStart + fldVarOffset; LclVarDsc* fldVarDsc = lvaGetDesc(fldVarNum); unsigned fldRegOffset = fldVarDsc->lvFldOffset / TARGET_POINTER_SIZE; assert(fldRegOffset >= lastFldRegOfLastByte); // Assuming sorted fields. // This loop should enumerate the offsets of any registers skipped. // Find what reg contains the last byte: // And start at the first register after that. If that isn't the first reg of the current for (unsigned skippedRegOffsets = lastFldRegOfLastByte + 1; skippedRegOffsets < fldRegOffset; skippedRegOffsets++) { // If the register number would not be an arg reg, we're done. if (firstArgRegNum + skippedRegOffsets >= MAX_REG_ARG) return; *pArgSkippedRegMask |= genRegMask(regNumber(firstArgRegNum + skippedRegOffsets)); } lastFldRegOfLastByte = (fldVarDsc->lvFldOffset + fldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE; } } #endif // TARGET_ARM /***************************************************************************** * * A little helper used to rearrange nested commutative operations. The * effect is that nested associative, commutative operations are transformed * into a 'left-deep' tree, i.e. into something like this: * * (((a op b) op c) op d) op... */ #if REARRANGE_ADDS void Compiler::fgMoveOpsLeft(GenTree* tree) { GenTree* op1; GenTree* op2; genTreeOps oper; do { op1 = tree->AsOp()->gtOp1; op2 = tree->AsOp()->gtOp2; oper = tree->OperGet(); noway_assert(GenTree::OperIsCommutative(oper)); noway_assert(oper == GT_ADD || oper == GT_XOR || oper == GT_OR || oper == GT_AND || oper == GT_MUL); noway_assert(!varTypeIsFloating(tree->TypeGet()) || !opts.genFPorder); noway_assert(oper == op2->gtOper); // Commutativity doesn't hold if overflow checks are needed if (tree->gtOverflowEx() || op2->gtOverflowEx()) { return; } if (gtIsActiveCSE_Candidate(op2)) { // If we have marked op2 as a CSE candidate, // we can't perform a commutative reordering // because any value numbers that we computed for op2 // will be incorrect after performing a commutative reordering // return; } if (oper == GT_MUL && (op2->gtFlags & GTF_MUL_64RSLT)) { return; } // Check for GTF_ADDRMODE_NO_CSE flag on add/mul Binary Operators if (((oper == GT_ADD) || (oper == GT_MUL)) && ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0)) { return; } if ((tree->gtFlags | op2->gtFlags) & GTF_BOOLEAN) { // We could deal with this, but we were always broken and just hit the assert // below regarding flags, which means it's not frequent, so will just bail out. // See #195514 return; } noway_assert(!tree->gtOverflowEx() && !op2->gtOverflowEx()); GenTree* ad1 = op2->AsOp()->gtOp1; GenTree* ad2 = op2->AsOp()->gtOp2; // Compiler::optOptimizeBools() can create GT_OR of two GC pointers yeilding a GT_INT // We can not reorder such GT_OR trees // if (varTypeIsGC(ad1->TypeGet()) != varTypeIsGC(op2->TypeGet())) { break; } // Don't split up a byref calculation and create a new byref. E.g., // [byref]+ (ref, [int]+ (int, int)) => [byref]+ ([byref]+ (ref, int), int). // Doing this transformation could create a situation where the first // addition (that is, [byref]+ (ref, int) ) creates a byref pointer that // no longer points within the ref object. If a GC happens, the byref won't // get updated. This can happen, for instance, if one of the int components // is negative. It also requires the address generation be in a fully-interruptible // code region. // if (varTypeIsGC(op1->TypeGet()) && op2->TypeGet() == TYP_I_IMPL) { assert(varTypeIsGC(tree->TypeGet()) && (oper == GT_ADD)); break; } /* Change "(x op (y op z))" to "(x op y) op z" */ /* ie. "(op1 op (ad1 op ad2))" to "(op1 op ad1) op ad2" */ GenTree* new_op1 = op2; new_op1->AsOp()->gtOp1 = op1; new_op1->AsOp()->gtOp2 = ad1; /* Change the flags. */ // Make sure we arent throwing away any flags noway_assert((new_op1->gtFlags & ~(GTF_MAKE_CSE | GTF_DONT_CSE | // It is ok that new_op1->gtFlags contains GTF_DONT_CSE flag. GTF_REVERSE_OPS | // The reverse ops flag also can be set, it will be re-calculated GTF_NODE_MASK | GTF_ALL_EFFECT | GTF_UNSIGNED)) == 0); new_op1->gtFlags = (new_op1->gtFlags & (GTF_NODE_MASK | GTF_DONT_CSE)) | // Make sure we propagate GTF_DONT_CSE flag. (op1->gtFlags & GTF_ALL_EFFECT) | (ad1->gtFlags & GTF_ALL_EFFECT); /* Retype new_op1 if it has not/become a GC ptr. */ if (varTypeIsGC(op1->TypeGet())) { noway_assert((varTypeIsGC(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL && oper == GT_ADD) || // byref(ref + (int+int)) (varTypeIsI(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL && oper == GT_OR)); // int(gcref | int(gcref|intval)) new_op1->gtType = tree->gtType; } else if (varTypeIsGC(ad2->TypeGet())) { // Neither ad1 nor op1 are GC. So new_op1 isnt either noway_assert(op1->gtType == TYP_I_IMPL && ad1->gtType == TYP_I_IMPL); new_op1->gtType = TYP_I_IMPL; } // If new_op1 is a new expression. Assign it a new unique value number. // vnStore is null before the ValueNumber phase has run if (vnStore != nullptr) { // We can only keep the old value number on new_op1 if both op1 and ad2 // have the same non-NoVN value numbers. Since op is commutative, comparing // only ad2 and op1 is enough. if ((op1->gtVNPair.GetLiberal() == ValueNumStore::NoVN) || (ad2->gtVNPair.GetLiberal() == ValueNumStore::NoVN) || (ad2->gtVNPair.GetLiberal() != op1->gtVNPair.GetLiberal())) { new_op1->gtVNPair.SetBoth(vnStore->VNForExpr(nullptr, new_op1->TypeGet())); } } tree->AsOp()->gtOp1 = new_op1; tree->AsOp()->gtOp2 = ad2; /* If 'new_op1' is now the same nested op, process it recursively */ if ((ad1->gtOper == oper) && !ad1->gtOverflowEx()) { fgMoveOpsLeft(new_op1); } /* If 'ad2' is now the same nested op, process it * Instead of recursion, we set up op1 and op2 for the next loop. */ op1 = new_op1; op2 = ad2; } while ((op2->gtOper == oper) && !op2->gtOverflowEx()); return; } #endif /*****************************************************************************/ void Compiler::fgSetRngChkTarget(GenTree* tree, bool delay) { if (tree->OperIs(GT_BOUNDS_CHECK)) { GenTreeBoundsChk* const boundsChk = tree->AsBoundsChk(); BasicBlock* const failBlock = fgSetRngChkTargetInner(boundsChk->gtThrowKind, delay); if (failBlock != nullptr) { boundsChk->gtIndRngFailBB = failBlock; } } else if (tree->OperIs(GT_INDEX_ADDR)) { GenTreeIndexAddr* const indexAddr = tree->AsIndexAddr(); BasicBlock* const failBlock = fgSetRngChkTargetInner(SCK_RNGCHK_FAIL, delay); if (failBlock != nullptr) { indexAddr->gtIndRngFailBB = failBlock; } } else { noway_assert(tree->OperIs(GT_ARR_ELEM, GT_ARR_INDEX)); fgSetRngChkTargetInner(SCK_RNGCHK_FAIL, delay); } } BasicBlock* Compiler::fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay) { if (opts.MinOpts()) { delay = false; } if (!opts.compDbgCode) { if (!delay && !compIsForInlining()) { // Create/find the appropriate "range-fail" label return fgRngChkTarget(compCurBB, kind); } } return nullptr; } /***************************************************************************** * * Expand a GT_INDEX node and fully morph the child operands * * The orginal GT_INDEX node is bashed into the GT_IND node that accesses * the array element. We expand the GT_INDEX node into a larger tree that * evaluates the array base and index. The simplest expansion is a GT_COMMA * with a GT_BOUNDS_CHECK and a GT_IND with a GTF_INX_RNGCHK flag. * For complex array or index expressions one or more GT_COMMA assignments * are inserted so that we only evaluate the array or index expressions once. * * The fully expanded tree is then morphed. This causes gtFoldExpr to * perform local constant prop and reorder the constants in the tree and * fold them. * * We then parse the resulting array element expression in order to locate * and label the constants and variables that occur in the tree. */ const int MAX_ARR_COMPLEXITY = 4; const int MAX_INDEX_COMPLEXITY = 4; GenTree* Compiler::fgMorphArrayIndex(GenTree* tree) { noway_assert(tree->gtOper == GT_INDEX); GenTreeIndex* asIndex = tree->AsIndex(); var_types elemTyp = asIndex->TypeGet(); unsigned elemSize = asIndex->gtIndElemSize; CORINFO_CLASS_HANDLE elemStructType = asIndex->gtStructElemClass; noway_assert(elemTyp != TYP_STRUCT || elemStructType != nullptr); // Fold "cns_str"[cns_index] to ushort constant // NOTE: don't do it for empty string, the operation will fail anyway if (opts.OptimizationEnabled() && asIndex->Arr()->OperIs(GT_CNS_STR) && !asIndex->Arr()->AsStrCon()->IsStringEmptyField() && asIndex->Index()->IsIntCnsFitsInI32()) { const int cnsIndex = static_cast<int>(asIndex->Index()->AsIntConCommon()->IconValue()); if (cnsIndex >= 0) { int length; const char16_t* str = info.compCompHnd->getStringLiteral(asIndex->Arr()->AsStrCon()->gtScpHnd, asIndex->Arr()->AsStrCon()->gtSconCPX, &length); if ((cnsIndex < length) && (str != nullptr)) { GenTree* cnsCharNode = gtNewIconNode(str[cnsIndex], TYP_INT); INDEBUG(cnsCharNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return cnsCharNode; } } } #ifdef FEATURE_SIMD if (featureSIMD && varTypeIsStruct(elemTyp) && structSizeMightRepresentSIMDType(elemSize)) { // If this is a SIMD type, this is the point at which we lose the type information, // so we need to set the correct type on the GT_IND. // (We don't care about the base type here, so we only check, but don't retain, the return value). unsigned simdElemSize = 0; if (getBaseJitTypeAndSizeOfSIMDType(elemStructType, &simdElemSize) != CORINFO_TYPE_UNDEF) { assert(simdElemSize == elemSize); elemTyp = getSIMDTypeForSize(elemSize); // This is the new type of the node. tree->gtType = elemTyp; // Now set elemStructType to null so that we don't confuse value numbering. elemStructType = nullptr; } } #endif // FEATURE_SIMD // Set up the array length's offset into lenOffs // And the first element's offset into elemOffs ssize_t lenOffs; ssize_t elemOffs; if (tree->gtFlags & GTF_INX_STRING_LAYOUT) { lenOffs = OFFSETOF__CORINFO_String__stringLen; elemOffs = OFFSETOF__CORINFO_String__chars; tree->gtFlags &= ~GTF_INX_STRING_LAYOUT; // Clear this flag as it is used for GTF_IND_VOLATILE } else { // We have a standard array lenOffs = OFFSETOF__CORINFO_Array__length; elemOffs = OFFSETOF__CORINFO_Array__data; } // In minopts, we expand GT_INDEX to GT_IND(GT_INDEX_ADDR) in order to minimize the size of the IR. As minopts // compilation time is roughly proportional to the size of the IR, this helps keep compilation times down. // Furthermore, this representation typically saves on code size in minopts w.r.t. the complete expansion // performed when optimizing, as it does not require LclVar nodes (which are always stack loads/stores in // minopts). // // When we *are* optimizing, we fully expand GT_INDEX to: // 1. Evaluate the array address expression and store the result in a temp if the expression is complex or // side-effecting. // 2. Evaluate the array index expression and store the result in a temp if the expression is complex or // side-effecting. // 3. Perform an explicit bounds check: GT_BOUNDS_CHECK(index, GT_ARR_LENGTH(array)) // 4. Compute the address of the element that will be accessed: // GT_ADD(GT_ADD(array, firstElementOffset), GT_MUL(index, elementSize)) // 5. Dereference the address with a GT_IND. // // This expansion explicitly exposes the bounds check and the address calculation to the optimizer, which allows // for more straightforward bounds-check removal, CSE, etc. if (opts.MinOpts()) { GenTree* const array = fgMorphTree(asIndex->Arr()); GenTree* const index = fgMorphTree(asIndex->Index()); GenTreeIndexAddr* const indexAddr = new (this, GT_INDEX_ADDR) GenTreeIndexAddr(array, index, elemTyp, elemStructType, elemSize, static_cast<unsigned>(lenOffs), static_cast<unsigned>(elemOffs)); indexAddr->gtFlags |= (array->gtFlags | index->gtFlags) & GTF_ALL_EFFECT; // Mark the indirection node as needing a range check if necessary. // Note this will always be true unless JitSkipArrayBoundCheck() is used if ((indexAddr->gtFlags & GTF_INX_RNGCHK) != 0) { fgSetRngChkTarget(indexAddr); } if (!tree->TypeIs(TYP_STRUCT)) { tree->ChangeOper(GT_IND); } else { DEBUG_DESTROY_NODE(tree); tree = gtNewObjNode(elemStructType, indexAddr); INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); } GenTreeIndir* const indir = tree->AsIndir(); indir->Addr() = indexAddr; bool canCSE = indir->CanCSE(); indir->gtFlags = GTF_IND_ARR_INDEX | (indexAddr->gtFlags & GTF_ALL_EFFECT); if (!canCSE) { indir->SetDoNotCSE(); } INDEBUG(indexAddr->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return indir; } GenTree* arrRef = asIndex->Arr(); GenTree* index = asIndex->Index(); bool chkd = ((tree->gtFlags & GTF_INX_RNGCHK) != 0); // if false, range checking will be disabled bool indexNonFaulting = ((tree->gtFlags & GTF_INX_NOFAULT) != 0); // if true, mark GTF_IND_NONFAULTING bool nCSE = ((tree->gtFlags & GTF_DONT_CSE) != 0); GenTree* arrRefDefn = nullptr; // non-NULL if we need to allocate a temp for the arrRef expression GenTree* indexDefn = nullptr; // non-NULL if we need to allocate a temp for the index expression GenTree* bndsChk = nullptr; // If we're doing range checking, introduce a GT_BOUNDS_CHECK node for the address. if (chkd) { GenTree* arrRef2 = nullptr; // The second copy will be used in array address expression GenTree* index2 = nullptr; // If the arrRef or index expressions involves an assignment, a call or reads from global memory, // then we *must* allocate a temporary in which to "localize" those values, to ensure that the // same values are used in the bounds check and the actual dereference. // Also we allocate the temporary when the expresion is sufficiently complex/expensive. // // Note that if the expression is a GT_FIELD, it has not yet been morphed so its true complexity is // not exposed. Without that condition there are cases of local struct fields that were previously, // needlessly, marked as GTF_GLOB_REF, and when that was fixed, there were some regressions that // were mostly ameliorated by adding this condition. // // Likewise, allocate a temporary if the expression is a GT_LCL_FLD node. These used to be created // after fgMorphArrayIndex from GT_FIELD trees so this preserves the existing behavior. This is // perhaps a decision that should be left to CSE but FX diffs show that it is slightly better to // do this here. if ((arrRef->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) || gtComplexityExceeds(&arrRef, MAX_ARR_COMPLEXITY) || arrRef->OperIs(GT_FIELD, GT_LCL_FLD)) { unsigned arrRefTmpNum = lvaGrabTemp(true DEBUGARG("arr expr")); arrRefDefn = gtNewTempAssign(arrRefTmpNum, arrRef); arrRef = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet()); arrRef2 = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet()); } else { arrRef2 = gtCloneExpr(arrRef); noway_assert(arrRef2 != nullptr); } if ((index->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) || gtComplexityExceeds(&index, MAX_ARR_COMPLEXITY) || index->OperIs(GT_FIELD, GT_LCL_FLD)) { unsigned indexTmpNum = lvaGrabTemp(true DEBUGARG("index expr")); indexDefn = gtNewTempAssign(indexTmpNum, index); index = gtNewLclvNode(indexTmpNum, index->TypeGet()); index2 = gtNewLclvNode(indexTmpNum, index->TypeGet()); } else { index2 = gtCloneExpr(index); noway_assert(index2 != nullptr); } // Next introduce a GT_BOUNDS_CHECK node var_types bndsChkType = TYP_INT; // By default, try to use 32-bit comparison for array bounds check. #ifdef TARGET_64BIT // The CLI Spec allows an array to be indexed by either an int32 or a native int. In the case // of a 64 bit architecture this means the array index can potentially be a TYP_LONG, so for this case, // the comparison will have to be widen to 64 bits. if (index->TypeGet() == TYP_I_IMPL) { bndsChkType = TYP_I_IMPL; } #endif // TARGET_64BIT GenTree* arrLen = gtNewArrLen(TYP_INT, arrRef, (int)lenOffs, compCurBB); if (bndsChkType != TYP_INT) { arrLen = gtNewCastNode(bndsChkType, arrLen, true, bndsChkType); } GenTreeBoundsChk* arrBndsChk = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, arrLen, SCK_RNGCHK_FAIL); bndsChk = arrBndsChk; // Now we'll switch to using the second copies for arrRef and index // to compute the address expression arrRef = arrRef2; index = index2; } // Create the "addr" which is "*(arrRef + ((index * elemSize) + elemOffs))" GenTree* addr; #ifdef TARGET_64BIT // Widen 'index' on 64-bit targets if (index->TypeGet() != TYP_I_IMPL) { if (index->OperGet() == GT_CNS_INT) { index->gtType = TYP_I_IMPL; } else { index = gtNewCastNode(TYP_I_IMPL, index, true, TYP_I_IMPL); } } #endif // TARGET_64BIT /* Scale the index value if necessary */ if (elemSize > 1) { GenTree* size = gtNewIconNode(elemSize, TYP_I_IMPL); // Fix 392756 WP7 Crossgen // // During codegen optGetArrayRefScaleAndIndex() makes the assumption that op2 of a GT_MUL node // is a constant and is not capable of handling CSE'ing the elemSize constant into a lclvar. // Hence to prevent the constant from becoming a CSE we mark it as NO_CSE. // size->gtFlags |= GTF_DONT_CSE; /* Multiply by the array element size */ addr = gtNewOperNode(GT_MUL, TYP_I_IMPL, index, size); } else { addr = index; } // Be careful to only create the byref pointer when the full index expression is added to the array reference. // We don't want to create a partial byref address expression that doesn't include the full index offset: // a byref must point within the containing object. It is dangerous (especially when optimizations come into // play) to create a "partial" byref that doesn't point exactly to the correct object; there is risk that // the partial byref will not point within the object, and thus not get updated correctly during a GC. // This is mostly a risk in fully-interruptible code regions. // We can generate two types of trees for "addr": // // 1) "arrRef + (index + elemOffset)" // 2) "(arrRef + elemOffset) + index" // // XArch has powerful addressing modes such as [base + index*scale + offset] so it's fine with 1), // while for Arm we better try to make an invariant sub-tree as large as possible, which is usually // "(arrRef + elemOffset)" and is CSE/LoopHoisting friendly => produces better codegen. // 2) should still be safe from GC's point of view since both ADD operations are byref and point to // within the object so GC will be able to correctly track and update them. bool groupArrayRefWithElemOffset = false; #ifdef TARGET_ARMARCH groupArrayRefWithElemOffset = true; // TODO: in some cases even on ARM we better use 1) shape because if "index" is invariant and "arrRef" is not // we at least will be able to hoist/CSE "index + elemOffset" in some cases. // See https://github.com/dotnet/runtime/pull/61293#issuecomment-964146497 // Use 2) form only for primitive types for now - it significantly reduced number of size regressions if (!varTypeIsIntegral(elemTyp) && !varTypeIsFloating(elemTyp)) { groupArrayRefWithElemOffset = false; } #endif // First element's offset GenTree* elemOffset = gtNewIconNode(elemOffs, TYP_I_IMPL); if (groupArrayRefWithElemOffset) { GenTree* basePlusOffset = gtNewOperNode(GT_ADD, TYP_BYREF, arrRef, elemOffset); addr = gtNewOperNode(GT_ADD, TYP_BYREF, basePlusOffset, addr); } else { addr = gtNewOperNode(GT_ADD, TYP_I_IMPL, addr, elemOffset); addr = gtNewOperNode(GT_ADD, TYP_BYREF, arrRef, addr); } assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE) != 0) || (GenTree::s_gtNodeSizes[GT_IND] == TREE_NODE_SZ_SMALL)); // Change the orginal GT_INDEX node into a GT_IND node tree->SetOper(GT_IND); // If the index node is a floating-point type, notify the compiler // we'll potentially use floating point registers at the time of codegen. if (varTypeUsesFloatReg(tree->gtType)) { this->compFloatingPointUsed = true; } // We've now consumed the GTF_INX_RNGCHK and GTF_INX_NOFAULT, and the node // is no longer a GT_INDEX node. tree->gtFlags &= ~(GTF_INX_RNGCHK | GTF_INX_NOFAULT); tree->AsOp()->gtOp1 = addr; // This is an array index expression. tree->gtFlags |= GTF_IND_ARR_INDEX; // If there's a bounds check, the indir won't fault. if (bndsChk || indexNonFaulting) { tree->gtFlags |= GTF_IND_NONFAULTING; } else { tree->gtFlags |= GTF_EXCEPT; } if (nCSE) { tree->gtFlags |= GTF_DONT_CSE; } // Store information about it. GetArrayInfoMap()->Set(tree, ArrayInfo(elemTyp, elemSize, (int)elemOffs, elemStructType)); // Remember this 'indTree' that we just created, as we still need to attach the fieldSeq information to it. GenTree* indTree = tree; // Did we create a bndsChk tree? if (bndsChk) { // Use a GT_COMMA node to prepend the array bound check // tree = gtNewOperNode(GT_COMMA, elemTyp, bndsChk, tree); /* Mark the indirection node as needing a range check */ fgSetRngChkTarget(bndsChk); } if (indexDefn != nullptr) { // Use a GT_COMMA node to prepend the index assignment // tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), indexDefn, tree); } if (arrRefDefn != nullptr) { // Use a GT_COMMA node to prepend the arRef assignment // tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), arrRefDefn, tree); } JITDUMP("fgMorphArrayIndex (before remorph):\n") DISPTREE(tree) // Currently we morph the tree to perform some folding operations prior // to attaching fieldSeq info and labeling constant array index contributions // tree = fgMorphTree(tree); JITDUMP("fgMorphArrayIndex (after remorph):\n") DISPTREE(tree) // Ideally we just want to proceed to attaching fieldSeq info and labeling the // constant array index contributions, but the morphing operation may have changed // the 'tree' into something that now unconditionally throws an exception. // // In such case the gtEffectiveVal could be a new tree or it's gtOper could be modified // or it could be left unchanged. If it is unchanged then we should not return, // instead we should proceed to attaching fieldSeq info, etc... // GenTree* arrElem = tree->gtEffectiveVal(); if (fgIsCommaThrow(tree)) { if ((arrElem != indTree) || // A new tree node may have been created (!indTree->OperIs(GT_IND))) // The GT_IND may have been changed to a GT_CNS_INT { return tree; // Just return the Comma-Throw, don't try to attach the fieldSeq info, etc.. } } assert(!fgGlobalMorph || (arrElem->gtDebugFlags & GTF_DEBUG_NODE_MORPHED)); DBEXEC(fgGlobalMorph && (arrElem == tree), tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED) addr = arrElem->gtGetOp1(); GenTree* cnsOff = nullptr; if (addr->OperIs(GT_ADD)) { GenTree* addrOp1 = addr->gtGetOp1(); if (groupArrayRefWithElemOffset) { if (addrOp1->OperIs(GT_ADD) && addrOp1->gtGetOp2()->IsCnsIntOrI()) { assert(addrOp1->gtGetOp1()->TypeIs(TYP_REF)); cnsOff = addrOp1->gtGetOp2(); addr = addr->gtGetOp2(); // Label any constant array index contributions with #ConstantIndex and any LclVars with // GTF_VAR_ARR_INDEX addr->LabelIndex(this); } else { assert(addr->gtGetOp2()->IsCnsIntOrI()); cnsOff = addr->gtGetOp2(); addr = nullptr; } } else { assert(addr->TypeIs(TYP_BYREF)); assert(addr->gtGetOp1()->TypeIs(TYP_REF)); addr = addr->gtGetOp2(); // Look for the constant [#FirstElem] node here, or as the RHS of an ADD. if (addr->IsCnsIntOrI()) { cnsOff = addr; addr = nullptr; } else { if ((addr->OperIs(GT_ADD)) && addr->gtGetOp2()->IsCnsIntOrI()) { cnsOff = addr->gtGetOp2(); addr = addr->gtGetOp1(); } // Label any constant array index contributions with #ConstantIndex and any LclVars with // GTF_VAR_ARR_INDEX addr->LabelIndex(this); } } } else if (addr->IsCnsIntOrI()) { cnsOff = addr; } FieldSeqNode* firstElemFseq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField); if ((cnsOff != nullptr) && (cnsOff->AsIntCon()->gtIconVal == elemOffs)) { // Assign it the [#FirstElem] field sequence // cnsOff->AsIntCon()->gtFieldSeq = firstElemFseq; } else // We have folded the first element's offset with the index expression { // Build the [#ConstantIndex, #FirstElem] field sequence // FieldSeqNode* constantIndexFseq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField); FieldSeqNode* fieldSeq = GetFieldSeqStore()->Append(constantIndexFseq, firstElemFseq); if (cnsOff == nullptr) // It must have folded into a zero offset { // Record in the general zero-offset map. fgAddFieldSeqForZeroOffset(addr, fieldSeq); } else { cnsOff->AsIntCon()->gtFieldSeq = fieldSeq; } } return tree; } #ifdef TARGET_X86 /***************************************************************************** * * Wrap fixed stack arguments for varargs functions to go through varargs * cookie to access them, except for the cookie itself. * * Non-x86 platforms are allowed to access all arguments directly * so we don't need this code. * */ GenTree* Compiler::fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs) { /* For the fixed stack arguments of a varargs function, we need to go through the varargs cookies to access them, except for the cookie itself */ LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvIsParam && !varDsc->lvIsRegArg && lclNum != lvaVarargsHandleArg) { // Create a node representing the local pointing to the base of the args GenTree* ptrArg = gtNewOperNode(GT_SUB, TYP_I_IMPL, gtNewLclvNode(lvaVarargsBaseOfStkArgs, TYP_I_IMPL), gtNewIconNode(varDsc->GetStackOffset() - codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES - lclOffs)); // Access the argument through the local GenTree* tree; if (varTypeIsStruct(varType)) { CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd(); assert(typeHnd != nullptr); tree = gtNewObjNode(typeHnd, ptrArg); } else { tree = gtNewOperNode(GT_IND, varType, ptrArg); } tree->gtFlags |= GTF_IND_TGTANYWHERE; if (varDsc->IsAddressExposed()) { tree->gtFlags |= GTF_GLOB_REF; } return fgMorphTree(tree); } return NULL; } #endif /***************************************************************************** * * Transform the given GT_LCL_VAR tree for code generation. */ GenTree* Compiler::fgMorphLocalVar(GenTree* tree, bool forceRemorph) { assert(tree->gtOper == GT_LCL_VAR); unsigned lclNum = tree->AsLclVarCommon()->GetLclNum(); var_types varType = lvaGetRealType(lclNum); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->IsAddressExposed()) { tree->gtFlags |= GTF_GLOB_REF; } #ifdef TARGET_X86 if (info.compIsVarArgs) { GenTree* newTree = fgMorphStackArgForVarArgs(lclNum, varType, 0); if (newTree != nullptr) { if (newTree->OperIsBlk() && ((tree->gtFlags & GTF_VAR_DEF) == 0)) { newTree->SetOper(GT_IND); } return newTree; } } #endif // TARGET_X86 /* If not during the global morphing phase bail */ if (!fgGlobalMorph && !forceRemorph) { return tree; } bool varAddr = (tree->gtFlags & GTF_DONT_CSE) != 0; noway_assert(!(tree->gtFlags & GTF_VAR_DEF) || varAddr); // GTF_VAR_DEF should always imply varAddr if (!varAddr && varDsc->lvNormalizeOnLoad()) { // TYP_BOOL quirk: previously, the code in optAssertionIsSubrange did not handle TYP_BOOL. // Now it does, but this leads to some regressions because we lose the uniform VNs for trees // that represent the "reduced" normalize-on-load locals, i. e. LCL_VAR(small type V00), created // here with local assertions, and "expanded", i. e. CAST(small type <- LCL_VAR(int V00)). // This is a pretty fundamental problem with how normalize-on-load locals appear to the optimizer. // This quirk preserves the previous behavior. // TODO-CQ: fix the VNs for normalize-on-load locals and remove this quirk. bool isBoolQuirk = varType == TYP_BOOL; // Assertion prop can tell us to omit adding a cast here. This is // useful when the local is a small-typed parameter that is passed in a // register: in that case, the ABI specifies that the upper bits might // be invalid, but the assertion guarantees us that we have normalized // when we wrote it. if (optLocalAssertionProp && !isBoolQuirk && optAssertionIsSubrange(tree, IntegralRange::ForType(varType), apFull) != NO_ASSERTION_INDEX) { // The previous assertion can guarantee us that if this node gets // assigned a register, it will be normalized already. It is still // possible that this node ends up being in memory, in which case // normalization will still be needed, so we better have the right // type. assert(tree->TypeGet() == varDsc->TypeGet()); return tree; } // Small-typed arguments and aliased locals are normalized on load. // Other small-typed locals are normalized on store. // Also, under the debugger as the debugger could write to the variable. // If this is one of the former, insert a narrowing cast on the load. // ie. Convert: var-short --> cast-short(var-int) tree->gtType = TYP_INT; fgMorphTreeDone(tree); tree = gtNewCastNode(TYP_INT, tree, false, varType); fgMorphTreeDone(tree); return tree; } return tree; } /***************************************************************************** Grab a temp for big offset morphing. This method will grab a new temp if no temp of this "type" has been created. Or it will return the same cached one if it has been created. */ unsigned Compiler::fgGetBigOffsetMorphingTemp(var_types type) { unsigned lclNum = fgBigOffsetMorphingTemps[type]; if (lclNum == BAD_VAR_NUM) { // We haven't created a temp for this kind of type. Create one now. lclNum = lvaGrabTemp(false DEBUGARG("Big Offset Morphing")); fgBigOffsetMorphingTemps[type] = lclNum; } else { // We better get the right type. noway_assert(lvaTable[lclNum].TypeGet() == type); } noway_assert(lclNum != BAD_VAR_NUM); return lclNum; } /***************************************************************************** * * Transform the given GT_FIELD tree for code generation. */ GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac) { assert(tree->gtOper == GT_FIELD); CORINFO_FIELD_HANDLE symHnd = tree->AsField()->gtFldHnd; unsigned fldOffset = tree->AsField()->gtFldOffset; GenTree* objRef = tree->AsField()->GetFldObj(); bool fieldMayOverlap = false; bool objIsLocal = false; if (fgGlobalMorph && (objRef != nullptr) && (objRef->gtOper == GT_ADDR)) { // Make sure we've checked if 'objRef' is an address of an implicit-byref parameter. // If it is, fgMorphImplicitByRefArgs may change it do a different opcode, which the // simd field rewrites are sensitive to. fgMorphImplicitByRefArgs(objRef); } noway_assert(((objRef != nullptr) && (objRef->IsLocalAddrExpr() != nullptr)) || ((tree->gtFlags & GTF_GLOB_REF) != 0)); if (tree->AsField()->gtFldMayOverlap) { fieldMayOverlap = true; // Reset the flag because we may reuse the node. tree->AsField()->gtFldMayOverlap = false; } #ifdef FEATURE_SIMD // if this field belongs to simd struct, translate it to simd intrinsic. if (mac == nullptr) { if (IsBaselineSimdIsaSupported()) { GenTree* newTree = fgMorphFieldToSimdGetElement(tree); if (newTree != tree) { newTree = fgMorphTree(newTree); return newTree; } } } else if ((objRef != nullptr) && (objRef->OperGet() == GT_ADDR) && varTypeIsSIMD(objRef->gtGetOp1())) { GenTreeLclVarCommon* lcl = objRef->IsLocalAddrExpr(); if (lcl != nullptr) { lvaSetVarDoNotEnregister(lcl->GetLclNum() DEBUGARG(DoNotEnregisterReason::LocalField)); } } #endif // Create a default MorphAddrContext early so it doesn't go out of scope // before it is used. MorphAddrContext defMAC(MACK_Ind); /* Is this an instance data member? */ if (objRef) { GenTree* addr; objIsLocal = objRef->IsLocal(); if (tree->gtFlags & GTF_IND_TLS_REF) { NO_WAY("instance field can not be a TLS ref."); } /* We'll create the expression "*(objRef + mem_offs)" */ noway_assert(varTypeIsGC(objRef->TypeGet()) || objRef->TypeGet() == TYP_I_IMPL); /* Now we have a tree like this: +--------------------+ | GT_FIELD | tree +----------+---------+ | +--------------+-------------+ |tree->AsField()->GetFldObj()| +--------------+-------------+ We want to make it like this (when fldOffset is <= MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT): +--------------------+ | GT_IND/GT_OBJ | tree +---------+----------+ | | +---------+----------+ | GT_ADD | addr +---------+----------+ | / \ / \ / \ +-------------------+ +----------------------+ | objRef | | fldOffset | | | | (when fldOffset !=0) | +-------------------+ +----------------------+ or this (when fldOffset is > MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT): +--------------------+ | GT_IND/GT_OBJ | tree +----------+---------+ | +----------+---------+ | GT_COMMA | comma2 +----------+---------+ | / \ / \ / \ / \ +---------+----------+ +---------+----------+ comma | GT_COMMA | | "+" (i.e. GT_ADD) | addr +---------+----------+ +---------+----------+ | | / \ / \ / \ / \ / \ / \ +-----+-----+ +-----+-----+ +---------+ +-----------+ asg | GT_ASG | ind | GT_IND | | tmpLcl | | fldOffset | +-----+-----+ +-----+-----+ +---------+ +-----------+ | | / \ | / \ | / \ | +-----+-----+ +-----+-----+ +-----------+ | tmpLcl | | objRef | | tmpLcl | +-----------+ +-----------+ +-----------+ */ var_types objRefType = objRef->TypeGet(); GenTree* comma = nullptr; // NULL mac means we encounter the GT_FIELD first. This denotes a dereference of the field, // and thus is equivalent to a MACK_Ind with zero offset. if (mac == nullptr) { mac = &defMAC; } // This flag is set to enable the "conservative" style of explicit null-check insertion. // This means that we insert an explicit null check whenever we create byref by adding a // constant offset to a ref, in a MACK_Addr context (meaning that the byref is not immediately // dereferenced). The alternative is "aggressive", which would not insert such checks (for // small offsets); in this plan, we would transfer some null-checking responsibility to // callee's of methods taking byref parameters. They would have to add explicit null checks // when creating derived byrefs from argument byrefs by adding constants to argument byrefs, in // contexts where the resulting derived byref is not immediately dereferenced (or if the offset is too // large). To make the "aggressive" scheme work, however, we'd also have to add explicit derived-from-null // checks for byref parameters to "external" methods implemented in C++, and in P/Invoke stubs. // This is left here to point out how to implement it. CLANG_FORMAT_COMMENT_ANCHOR; #define CONSERVATIVE_NULL_CHECK_BYREF_CREATION 1 bool addExplicitNullCheck = false; // Implicit byref locals and string literals are never null. if (fgAddrCouldBeNull(objRef)) { // If the objRef is a GT_ADDR node, it, itself, never requires null checking. The expression // whose address is being taken is either a local or static variable, whose address is necessarily // non-null, or else it is a field dereference, which will do its own bounds checking if necessary. if (objRef->gtOper != GT_ADDR && (mac->m_kind == MACK_Addr || mac->m_kind == MACK_Ind)) { if (!mac->m_allConstantOffsets || fgIsBigOffset(mac->m_totalOffset + fldOffset)) { addExplicitNullCheck = true; } else { // In R2R mode the field offset for some fields may change when the code // is loaded. So we can't rely on a zero offset here to suppress the null check. // // See GitHub issue #16454. bool fieldHasChangeableOffset = false; #ifdef FEATURE_READYTORUN fieldHasChangeableOffset = (tree->AsField()->gtFieldLookup.addr != nullptr); #endif #if CONSERVATIVE_NULL_CHECK_BYREF_CREATION addExplicitNullCheck = (mac->m_kind == MACK_Addr) && ((mac->m_totalOffset + fldOffset > 0) || fieldHasChangeableOffset); #else addExplicitNullCheck = (objRef->gtType == TYP_BYREF && mac->m_kind == MACK_Addr && ((mac->m_totalOffset + fldOffset > 0) || fieldHasChangeableOffset)); #endif } } } if (addExplicitNullCheck) { #ifdef DEBUG if (verbose) { printf("Before explicit null check morphing:\n"); gtDispTree(tree); } #endif // // Create the "comma" subtree // GenTree* asg = nullptr; GenTree* nullchk; unsigned lclNum; if (objRef->gtOper != GT_LCL_VAR) { lclNum = fgGetBigOffsetMorphingTemp(genActualType(objRef->TypeGet())); // Create the "asg" node asg = gtNewTempAssign(lclNum, objRef); } else { lclNum = objRef->AsLclVarCommon()->GetLclNum(); } GenTree* lclVar = gtNewLclvNode(lclNum, objRefType); nullchk = gtNewNullCheck(lclVar, compCurBB); nullchk->gtFlags |= GTF_DONT_CSE; // Don't try to create a CSE for these TYP_BYTE indirections if (asg) { // Create the "comma" node. comma = gtNewOperNode(GT_COMMA, TYP_VOID, // We don't want to return anything from this "comma" node. // Set the type to TYP_VOID, so we can select "cmp" instruction // instead of "mov" instruction later on. asg, nullchk); } else { comma = nullchk; } addr = gtNewLclvNode(lclNum, objRefType); // Use "tmpLcl" to create "addr" node. } else { addr = objRef; } #ifdef FEATURE_READYTORUN if (tree->AsField()->gtFieldLookup.addr != nullptr) { GenTree* offsetNode = nullptr; if (tree->AsField()->gtFieldLookup.accessType == IAT_PVALUE) { offsetNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)tree->AsField()->gtFieldLookup.addr, GTF_ICON_CONST_PTR, true); #ifdef DEBUG offsetNode->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)symHnd; #endif } else { noway_assert(!"unexpected accessType for R2R field access"); } var_types addType = (objRefType == TYP_I_IMPL) ? TYP_I_IMPL : TYP_BYREF; addr = gtNewOperNode(GT_ADD, addType, addr, offsetNode); } #endif if (fldOffset != 0) { // Generate the "addr" node. /* Add the member offset to the object's address */ FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd); addr = gtNewOperNode(GT_ADD, (var_types)(objRefType == TYP_I_IMPL ? TYP_I_IMPL : TYP_BYREF), addr, gtNewIconHandleNode(fldOffset, GTF_ICON_FIELD_OFF, fieldSeq)); } // Now let's set the "tree" as a GT_IND tree. tree->SetOper(GT_IND); tree->AsOp()->gtOp1 = addr; tree->SetIndirExceptionFlags(this); if (addExplicitNullCheck) { // // Create "comma2" node and link it to "tree". // GenTree* comma2; comma2 = gtNewOperNode(GT_COMMA, addr->TypeGet(), // The type of "comma2" node is the same as the type of "addr" node. comma, addr); tree->AsOp()->gtOp1 = comma2; } #ifdef DEBUG if (verbose) { if (addExplicitNullCheck) { printf("After adding explicit null check:\n"); gtDispTree(tree); } } #endif } else /* This is a static data member */ { if (tree->gtFlags & GTF_IND_TLS_REF) { // Thread Local Storage static field reference // // Field ref is a TLS 'Thread-Local-Storage' reference // // Build this tree: IND(*) # // | // ADD(I_IMPL) // / \. // / CNS(fldOffset) // / // / // / // IND(I_IMPL) == [Base of this DLL's TLS] // | // ADD(I_IMPL) // / \. // / CNS(IdValue*4) or MUL // / / \. // IND(I_IMPL) / CNS(4) // | / // CNS(TLS_HDL,0x2C) IND // | // CNS(pIdAddr) // // # Denotes the orginal node // void** pIdAddr = nullptr; unsigned IdValue = info.compCompHnd->getFieldThreadLocalStoreID(symHnd, (void**)&pIdAddr); // // If we can we access the TLS DLL index ID value directly // then pIdAddr will be NULL and // IdValue will be the actual TLS DLL index ID // GenTree* dllRef = nullptr; if (pIdAddr == nullptr) { if (IdValue != 0) { dllRef = gtNewIconNode(IdValue * 4, TYP_I_IMPL); } } else { dllRef = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)pIdAddr, GTF_ICON_CONST_PTR, true); // Next we multiply by 4 dllRef = gtNewOperNode(GT_MUL, TYP_I_IMPL, dllRef, gtNewIconNode(4, TYP_I_IMPL)); } #define WIN32_TLS_SLOTS (0x2C) // Offset from fs:[0] where the pointer to the slots resides // Mark this ICON as a TLS_HDL, codegen will use FS:[cns] GenTree* tlsRef = gtNewIconHandleNode(WIN32_TLS_SLOTS, GTF_ICON_TLS_HDL); // Translate GTF_FLD_INITCLASS to GTF_ICON_INITCLASS if ((tree->gtFlags & GTF_FLD_INITCLASS) != 0) { tree->gtFlags &= ~GTF_FLD_INITCLASS; tlsRef->gtFlags |= GTF_ICON_INITCLASS; } tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef); if (dllRef != nullptr) { /* Add the dllRef */ tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, dllRef); } /* indirect to have tlsRef point at the base of the DLLs Thread Local Storage */ tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef); if (fldOffset != 0) { FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd); GenTree* fldOffsetNode = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, fldOffset, fieldSeq); /* Add the TLS static field offset to the address */ tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, fldOffsetNode); } // Final indirect to get to actual value of TLS static field tree->SetOper(GT_IND); tree->AsOp()->gtOp1 = tlsRef; noway_assert(tree->gtFlags & GTF_IND_TLS_REF); } else { assert(!fieldMayOverlap); // Normal static field reference // // If we can we access the static's address directly // then pFldAddr will be NULL and // fldAddr will be the actual address of the static field // void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(symHnd, (void**)&pFldAddr); // We should always be able to access this static field address directly // assert(pFldAddr == nullptr); // For boxed statics, this direct address will be for the box. We have already added // the indirection for the field itself and attached the sequence, in importation. bool isBoxedStatic = gtIsStaticFieldPtrToBoxedStruct(tree->TypeGet(), symHnd); FieldSeqNode* fldSeq = !isBoxedStatic ? GetFieldSeqStore()->CreateSingleton(symHnd) : FieldSeqStore::NotAField(); // TODO-CQ: enable this optimization for 32 bit targets. bool isStaticReadOnlyInited = false; #ifdef TARGET_64BIT if (tree->TypeIs(TYP_REF) && !isBoxedStatic) { bool pIsSpeculative = true; if (info.compCompHnd->getStaticFieldCurrentClass(symHnd, &pIsSpeculative) != NO_CLASS_HANDLE) { isStaticReadOnlyInited = !pIsSpeculative; } } #endif // TARGET_64BIT // TODO: choices made below have mostly historical reasons and // should be unified to always use the IND(<address>) form. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT bool preferIndir = isBoxedStatic || isStaticReadOnlyInited || (IMAGE_REL_BASED_REL32 != eeGetRelocTypeHint(fldAddr)); #else // !TARGET_64BIT bool preferIndir = isBoxedStatic; #endif // !TARGET_64BIT if (preferIndir) { GenTreeFlags handleKind = GTF_EMPTY; if (isBoxedStatic) { handleKind = GTF_ICON_STATIC_BOX_PTR; } else if (isStaticReadOnlyInited) { handleKind = GTF_ICON_CONST_PTR; } else { handleKind = GTF_ICON_STATIC_HDL; } GenTree* addr = gtNewIconHandleNode((size_t)fldAddr, handleKind, fldSeq); // Translate GTF_FLD_INITCLASS to GTF_ICON_INITCLASS, if we need to. if (((tree->gtFlags & GTF_FLD_INITCLASS) != 0) && !isStaticReadOnlyInited) { tree->gtFlags &= ~GTF_FLD_INITCLASS; addr->gtFlags |= GTF_ICON_INITCLASS; } tree->SetOper(GT_IND); tree->AsOp()->gtOp1 = addr; if (isBoxedStatic) { // The box for the static cannot be null, and is logically invariant, since it // represents (a base for) the static's address. tree->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); } else if (isStaticReadOnlyInited) { JITDUMP("Marking initialized static read-only field '%s' as invariant.\n", eeGetFieldName(symHnd)); // Static readonly field is not null at this point (see getStaticFieldCurrentClass impl). tree->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); } return fgMorphSmpOp(tree); } else { // Only volatile or classinit could be set, and they map over noway_assert((tree->gtFlags & ~(GTF_FLD_VOLATILE | GTF_FLD_INITCLASS | GTF_COMMON_MASK)) == 0); static_assert_no_msg(GTF_FLD_VOLATILE == GTF_CLS_VAR_VOLATILE); static_assert_no_msg(GTF_FLD_INITCLASS == GTF_CLS_VAR_INITCLASS); tree->SetOper(GT_CLS_VAR); tree->AsClsVar()->gtClsVarHnd = symHnd; tree->AsClsVar()->gtFieldSeq = fldSeq; } return tree; } } noway_assert(tree->gtOper == GT_IND); if (fldOffset == 0) { GenTree* addr = tree->AsOp()->gtOp1; // 'addr' may be a GT_COMMA. Skip over any comma nodes addr = addr->gtEffectiveVal(); #ifdef DEBUG if (verbose) { printf("\nBefore calling fgAddFieldSeqForZeroOffset:\n"); gtDispTree(tree); } #endif // We expect 'addr' to be an address at this point. assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF); // Since we don't make a constant zero to attach the field sequence to, associate it with the "addr" node. FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd); fgAddFieldSeqForZeroOffset(addr, fieldSeq); } // Pass down the current mac; if non null we are computing an address GenTree* result = fgMorphSmpOp(tree, mac); #ifdef DEBUG if (verbose) { printf("\nFinal value of Compiler::fgMorphField after calling fgMorphSmpOp:\n"); gtDispTree(result); } #endif return result; } //------------------------------------------------------------------------------ // fgMorphCallInline: attempt to inline a call // // Arguments: // call - call expression to inline, inline candidate // inlineResult - result tracking and reporting // // Notes: // Attempts to inline the call. // // If successful, callee's IR is inserted in place of the call, and // is marked with an InlineContext. // // If unsuccessful, the transformations done in anticipation of a // possible inline are undone, and the candidate flag on the call // is cleared. void Compiler::fgMorphCallInline(GenTreeCall* call, InlineResult* inlineResult) { bool inliningFailed = false; // Is this call an inline candidate? if (call->IsInlineCandidate()) { InlineContext* createdContext = nullptr; // Attempt the inline fgMorphCallInlineHelper(call, inlineResult, &createdContext); // We should have made up our minds one way or another.... assert(inlineResult->IsDecided()); // If we failed to inline, we have a bit of work to do to cleanup if (inlineResult->IsFailure()) { if (createdContext != nullptr) { // We created a context before we got to the failure, so mark // it as failed in the tree. createdContext->SetFailed(inlineResult); } else { #ifdef DEBUG // In debug we always put all inline attempts into the inline tree. InlineContext* ctx = m_inlineStrategy->NewContext(call->gtInlineCandidateInfo->inlinersContext, fgMorphStmt, call); ctx->SetFailed(inlineResult); #endif } inliningFailed = true; // Clear the Inline Candidate flag so we can ensure later we tried // inlining all candidates. // call->gtFlags &= ~GTF_CALL_INLINE_CANDIDATE; } } else { // This wasn't an inline candidate. So it must be a GDV candidate. assert(call->IsGuardedDevirtualizationCandidate()); // We already know we can't inline this call, so don't even bother to try. inliningFailed = true; } // If we failed to inline (or didn't even try), do some cleanup. if (inliningFailed) { if (call->gtReturnType != TYP_VOID) { JITDUMP("Inlining [%06u] failed, so bashing " FMT_STMT " to NOP\n", dspTreeID(call), fgMorphStmt->GetID()); // Detach the GT_CALL tree from the original statement by // hanging a "nothing" node to it. Later the "nothing" node will be removed // and the original GT_CALL tree will be picked up by the GT_RET_EXPR node. noway_assert(fgMorphStmt->GetRootNode() == call); fgMorphStmt->SetRootNode(gtNewNothingNode()); } } } //------------------------------------------------------------------------------ // fgMorphCallInlineHelper: Helper to attempt to inline a call // // Arguments: // call - call expression to inline, inline candidate // result - result to set to success or failure // createdContext - The context that was created if the inline attempt got to the inliner. // // Notes: // Attempts to inline the call. // // If successful, callee's IR is inserted in place of the call, and // is marked with an InlineContext. // // If unsuccessful, the transformations done in anticipation of a // possible inline are undone, and the candidate flag on the call // is cleared. // // If a context was created because we got to the importer then it is output by this function. // If the inline succeeded, this context will already be marked as successful. If it failed and // a context is returned, then it will not have been marked as success or failed. void Compiler::fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext) { // Don't expect any surprises here. assert(result->IsCandidate()); if (lvaCount >= MAX_LV_NUM_COUNT_FOR_INLINING) { // For now, attributing this to call site, though it's really // more of a budget issue (lvaCount currently includes all // caller and prospective callee locals). We still might be // able to inline other callees into this caller, or inline // this callee in other callers. result->NoteFatal(InlineObservation::CALLSITE_TOO_MANY_LOCALS); return; } if (call->IsVirtual()) { result->NoteFatal(InlineObservation::CALLSITE_IS_VIRTUAL); return; } // Re-check this because guarded devirtualization may allow these through. if (gtIsRecursiveCall(call) && call->IsImplicitTailCall()) { result->NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL); return; } // impMarkInlineCandidate() is expected not to mark tail prefixed calls // and recursive tail calls as inline candidates. noway_assert(!call->IsTailPrefixedCall()); noway_assert(!call->IsImplicitTailCall() || !gtIsRecursiveCall(call)); // // Calling inlinee's compiler to inline the method. // unsigned startVars = lvaCount; #ifdef DEBUG if (verbose) { printf("Expanding INLINE_CANDIDATE in statement "); printStmtID(fgMorphStmt); printf(" in " FMT_BB ":\n", compCurBB->bbNum); gtDispStmt(fgMorphStmt); if (call->IsImplicitTailCall()) { printf("Note: candidate is implicit tail call\n"); } } #endif impInlineRoot()->m_inlineStrategy->NoteAttempt(result); // // Invoke the compiler to inline the call. // fgInvokeInlineeCompiler(call, result, createdContext); if (result->IsFailure()) { // Undo some changes made in anticipation of inlining... // Zero out the used locals memset(lvaTable + startVars, 0, (lvaCount - startVars) * sizeof(*lvaTable)); for (unsigned i = startVars; i < lvaCount; i++) { new (&lvaTable[i], jitstd::placement_t()) LclVarDsc(); // call the constructor. } lvaCount = startVars; #ifdef DEBUG if (verbose) { // printf("Inlining failed. Restore lvaCount to %d.\n", lvaCount); } #endif return; } #ifdef DEBUG if (verbose) { // printf("After inlining lvaCount=%d.\n", lvaCount); } #endif } //------------------------------------------------------------------------ // fgCanFastTailCall: Check to see if this tail call can be optimized as epilog+jmp. // // Arguments: // callee - The callee to check // failReason - If this method returns false, the reason why. Can be nullptr. // // Return Value: // Returns true or false based on whether the callee can be fastTailCalled // // Notes: // This function is target specific and each target will make the fastTailCall // decision differently. See the notes below. // // This function calls fgInitArgInfo() to initialize the arg info table, which // is used to analyze the argument. This function can alter the call arguments // by adding argument IR nodes for non-standard arguments. // // Windows Amd64: // A fast tail call can be made whenever the number of callee arguments // is less than or equal to the number of caller arguments, or we have four // or fewer callee arguments. This is because, on Windows AMD64, each // argument uses exactly one register or one 8-byte stack slot. Thus, we only // need to count arguments, and not be concerned with the size of each // incoming or outgoing argument. // // Can fast tail call examples (amd64 Windows): // // -- Callee will have all register arguments -- // caller(int, int, int, int) // callee(int, int, float, int) // // -- Callee requires stack space that is equal or less than the caller -- // caller(struct, struct, struct, struct, struct, struct) // callee(int, int, int, int, int, int) // // -- Callee requires stack space that is less than the caller -- // caller(struct, double, struct, float, struct, struct) // callee(int, int, int, int, int) // // -- Callee will have all register arguments -- // caller(int) // callee(int, int, int, int) // // Cannot fast tail call examples (amd64 Windows): // // -- Callee requires stack space that is larger than the caller -- // caller(struct, double, struct, float, struct, struct) // callee(int, int, int, int, int, double, double, double) // // -- Callee has a byref struct argument -- // caller(int, int, int) // callee(struct(size 3 bytes)) // // Unix Amd64 && Arm64: // A fastTailCall decision can be made whenever the callee's stack space is // less than or equal to the caller's stack space. There are many permutations // of when the caller and callee have different stack sizes if there are // structs being passed to either the caller or callee. // // Exceptions: // If the callee has a 9 to 16 byte struct argument and the callee has // stack arguments, the decision will be to not fast tail call. This is // because before fgMorphArgs is done, the struct is unknown whether it // will be placed on the stack or enregistered. Therefore, the conservative // decision of do not fast tail call is taken. This limitations should be // removed if/when fgMorphArgs no longer depends on fgCanFastTailCall. // // Can fast tail call examples (amd64 Unix): // // -- Callee will have all register arguments -- // caller(int, int, int, int) // callee(int, int, float, int) // // -- Callee requires stack space that is equal to the caller -- // caller({ long, long }, { int, int }, { int }, { int }, { int }, { int }) -- 6 int register arguments, 16 byte // stack // space // callee(int, int, int, int, int, int, int, int) -- 6 int register arguments, 16 byte stack space // // -- Callee requires stack space that is less than the caller -- // caller({ long, long }, int, { long, long }, int, { long, long }, { long, long }) 6 int register arguments, 32 byte // stack // space // callee(int, int, int, int, int, int, { long, long } ) // 6 int register arguments, 16 byte stack space // // -- Callee will have all register arguments -- // caller(int) // callee(int, int, int, int) // // Cannot fast tail call examples (amd64 Unix): // // -- Callee requires stack space that is larger than the caller -- // caller(float, float, float, float, float, float, float, float) -- 8 float register arguments // callee(int, int, int, int, int, int, int, int) -- 6 int register arguments, 16 byte stack space // // -- Callee has structs which cannot be enregistered (Implementation Limitation) -- // caller(float, float, float, float, float, float, float, float, { double, double, double }) -- 8 float register // arguments, 24 byte stack space // callee({ double, double, double }) -- 24 bytes stack space // // -- Callee requires stack space and has a struct argument >8 bytes and <16 bytes (Implementation Limitation) -- // caller(int, int, int, int, int, int, { double, double, double }) -- 6 int register arguments, 24 byte stack space // callee(int, int, int, int, int, int, { int, int }) -- 6 int registers, 16 byte stack space // // -- Caller requires stack space and nCalleeArgs > nCallerArgs (Bug) -- // caller({ double, double, double, double, double, double }) // 48 byte stack // callee(int, int) -- 2 int registers bool Compiler::fgCanFastTailCall(GenTreeCall* callee, const char** failReason) { #if FEATURE_FASTTAILCALL // To reach here means that the return types of the caller and callee are tail call compatible. // In the case of structs that can be returned in a register, compRetNativeType is set to the actual return type. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (callee->IsTailPrefixedCall()) { var_types retType = info.compRetType; assert(impTailCallRetTypeCompatible(false, retType, info.compMethodInfo->args.retTypeClass, info.compCallConv, (var_types)callee->gtReturnType, callee->gtRetClsHnd, callee->GetUnmanagedCallConv())); } #endif assert(!callee->AreArgsComplete()); fgInitArgInfo(callee); fgArgInfo* argInfo = callee->fgArgInfo; unsigned calleeArgStackSize = 0; unsigned callerArgStackSize = info.compArgStackSize; for (unsigned index = 0; index < argInfo->ArgCount(); ++index) { fgArgTabEntry* arg = argInfo->GetArgEntry(index, false); calleeArgStackSize = roundUp(calleeArgStackSize, arg->GetByteAlignment()); calleeArgStackSize += arg->GetStackByteSize(); } calleeArgStackSize = GetOutgoingArgByteSize(calleeArgStackSize); auto reportFastTailCallDecision = [&](const char* thisFailReason) { if (failReason != nullptr) { *failReason = thisFailReason; } #ifdef DEBUG if ((JitConfig.JitReportFastTailCallDecisions()) == 1) { if (callee->gtCallType != CT_INDIRECT) { const char* methodName; methodName = eeGetMethodFullName(callee->gtCallMethHnd); printf("[Fast tailcall decision]: Caller: %s\n[Fast tailcall decision]: Callee: %s -- Decision: ", info.compFullName, methodName); } else { printf("[Fast tailcall decision]: Caller: %s\n[Fast tailcall decision]: Callee: IndirectCall -- " "Decision: ", info.compFullName); } if (thisFailReason == nullptr) { printf("Will fast tailcall"); } else { printf("Will not fast tailcall (%s)", thisFailReason); } printf(" (CallerArgStackSize: %d, CalleeArgStackSize: %d)\n\n", callerArgStackSize, calleeArgStackSize); } else { if (thisFailReason == nullptr) { JITDUMP("[Fast tailcall decision]: Will fast tailcall\n"); } else { JITDUMP("[Fast tailcall decision]: Will not fast tailcall (%s)\n", thisFailReason); } } #endif // DEBUG }; if (!opts.compFastTailCalls) { reportFastTailCallDecision("Configuration doesn't allow fast tail calls"); return false; } if (callee->IsStressTailCall()) { reportFastTailCallDecision("Fast tail calls are not performed under tail call stress"); return false; } // Note on vararg methods: // If the caller is vararg method, we don't know the number of arguments passed by caller's caller. // But we can be sure that in-coming arg area of vararg caller would be sufficient to hold its // fixed args. Therefore, we can allow a vararg method to fast tail call other methods as long as // out-going area required for callee is bounded by caller's fixed argument space. // // Note that callee being a vararg method is not a problem since we can account the params being passed. // // We will currently decide to not fast tail call on Windows armarch if the caller or callee is a vararg // method. This is due to the ABI differences for native vararg methods for these platforms. There is // work required to shuffle arguments to the correct locations. CLANG_FORMAT_COMMENT_ANCHOR; if (TargetOS::IsWindows && TargetArchitecture::IsArmArch && (info.compIsVarArgs || callee->IsVarargs())) { reportFastTailCallDecision("Fast tail calls with varargs not supported on Windows ARM/ARM64"); return false; } if (compLocallocUsed) { reportFastTailCallDecision("Localloc used"); return false; } #ifdef TARGET_AMD64 // Needed for Jit64 compat. // In future, enabling fast tail calls from methods that need GS cookie // check would require codegen side work to emit GS cookie check before a // tail call. if (getNeedsGSSecurityCookie()) { reportFastTailCallDecision("GS Security cookie check required"); return false; } #endif // If the NextCallReturnAddress intrinsic is used we should do normal calls. if (info.compHasNextCallRetAddr) { reportFastTailCallDecision("Uses NextCallReturnAddress intrinsic"); return false; } if (callee->HasRetBufArg()) // RetBuf { // If callee has RetBuf param, caller too must have it. // Otherwise go the slow route. if (info.compRetBuffArg == BAD_VAR_NUM) { reportFastTailCallDecision("Callee has RetBuf but caller does not."); return false; } } // For a fast tail call the caller will use its incoming arg stack space to place // arguments, so if the callee requires more arg stack space than is available here // the fast tail call cannot be performed. This is common to all platforms. // Note that the GC'ness of on stack args need not match since the arg setup area is marked // as non-interruptible for fast tail calls. if (calleeArgStackSize > callerArgStackSize) { reportFastTailCallDecision("Not enough incoming arg space"); return false; } // For Windows some struct parameters are copied on the local frame // and then passed by reference. We cannot fast tail call in these situation // as we need to keep our frame around. if (fgCallHasMustCopyByrefParameter(callee)) { reportFastTailCallDecision("Callee has a byref parameter"); return false; } reportFastTailCallDecision(nullptr); return true; #else // FEATURE_FASTTAILCALL if (failReason) *failReason = "Fast tailcalls are not supported on this platform"; return false; #endif } //------------------------------------------------------------------------ // fgCallHasMustCopyByrefParameter: Check to see if this call has a byref parameter that // requires a struct copy in the caller. // // Arguments: // callee - The callee to check // // Return Value: // Returns true or false based on whether this call has a byref parameter that // requires a struct copy in the caller. #if FEATURE_FASTTAILCALL bool Compiler::fgCallHasMustCopyByrefParameter(GenTreeCall* callee) { fgArgInfo* argInfo = callee->fgArgInfo; bool hasMustCopyByrefParameter = false; for (unsigned index = 0; index < argInfo->ArgCount(); ++index) { fgArgTabEntry* arg = argInfo->GetArgEntry(index, false); if (arg->isStruct) { if (arg->passedByRef) { // Generally a byref arg will block tail calling, as we have to // make a local copy of the struct for the callee. hasMustCopyByrefParameter = true; // If we're optimizing, we may be able to pass our caller's byref to our callee, // and so still be able to avoid a struct copy. if (opts.OptimizationEnabled()) { // First, see if this arg is an implicit byref param. GenTreeLclVar* const lcl = arg->GetNode()->IsImplicitByrefParameterValue(this); if (lcl != nullptr) { // Yes, the arg is an implicit byref param. const unsigned lclNum = lcl->GetLclNum(); LclVarDsc* const varDsc = lvaGetDesc(lcl); // The param must not be promoted; if we've promoted, then the arg will be // a local struct assembled from the promoted fields. if (varDsc->lvPromoted) { JITDUMP("Arg [%06u] is promoted implicit byref V%02u, so no tail call\n", dspTreeID(arg->GetNode()), lclNum); } else { JITDUMP("Arg [%06u] is unpromoted implicit byref V%02u, seeing if we can still tail call\n", dspTreeID(arg->GetNode()), lclNum); // We have to worry about introducing aliases if we bypass copying // the struct at the call. We'll do some limited analysis to see if we // can rule this out. const unsigned argLimit = 6; // If this is the only appearance of the byref in the method, then // aliasing is not possible. // // If no other call arg refers to this byref, and no other arg is // a pointer which could refer to this byref, we can optimize. // // We only check this for calls with small numbers of arguments, // as the analysis cost will be quadratic. // const unsigned totalAppearances = varDsc->lvRefCnt(RCS_EARLY); const unsigned callAppearances = (unsigned)varDsc->lvRefCntWtd(RCS_EARLY); assert(totalAppearances >= callAppearances); if (totalAppearances == 1) { JITDUMP("... yes, arg is the only appearance of V%02u\n", lclNum); hasMustCopyByrefParameter = false; } else if (totalAppearances > callAppearances) { // lvRefCntWtd tracks the number of appearances of the arg at call sites. // If this number doesn't match the regular ref count, there is // a non-call appearance, and we must be conservative. // JITDUMP("... no, arg has %u non-call appearance(s)\n", totalAppearances - callAppearances); } else if (argInfo->ArgCount() <= argLimit) { JITDUMP("... all %u appearance(s) are as implicit byref args to calls.\n" "... Running alias analysis on this call's args\n", totalAppearances); GenTree* interferingArg = nullptr; for (unsigned index2 = 0; index2 < argInfo->ArgCount(); ++index2) { if (index2 == index) { continue; } fgArgTabEntry* const arg2 = argInfo->GetArgEntry(index2, false); JITDUMP("... checking other arg [%06u]...\n", dspTreeID(arg2->GetNode())); DISPTREE(arg2->GetNode()); // Do we pass 'lcl' more than once to the callee? if (arg2->isStruct && arg2->passedByRef) { GenTreeLclVarCommon* const lcl2 = arg2->GetNode()->IsImplicitByrefParameterValue(this); if ((lcl2 != nullptr) && (lclNum == lcl2->GetLclNum())) { // not copying would introduce aliased implicit byref structs // in the callee ... we can't optimize. interferingArg = arg2->GetNode(); break; } else { JITDUMP("... arg refers to different implicit byref V%02u\n", lcl2->GetLclNum()); continue; } } // Do we pass a byref pointer which might point within 'lcl'? // // We can assume the 'lcl' is unaliased on entry to the // method, so the only way we can have an aliasing byref pointer at // the call is if 'lcl' is address taken/exposed in the method. // // Note even though 'lcl' is not promoted, we are in the middle // of the promote->rewrite->undo->(morph)->demote cycle, and so // might see references to promoted fields of 'lcl' that haven't yet // been demoted (see fgMarkDemotedImplicitByRefArgs). // // So, we also need to scan all 'lcl's fields, if any, to see if they // are exposed. // // When looking for aliases from other args, we check for both TYP_BYREF // and TYP_I_IMPL typed args here. Conceptually anything that points into // an implicit byref parameter should be TYP_BYREF, as these parameters could // refer to boxed heap locations (say if the method is invoked by reflection) // but there are some stack only structs (like typed references) where // the importer/runtime code uses TYP_I_IMPL, and fgInitArgInfo will // transiently retype all simple address-of implicit parameter args as // TYP_I_IMPL. // if ((arg2->argType == TYP_BYREF) || (arg2->argType == TYP_I_IMPL)) { JITDUMP("...arg is a byref, must run an alias check\n"); bool checkExposure = true; bool hasExposure = false; // See if there is any way arg could refer to a parameter struct. GenTree* arg2Node = arg2->GetNode(); if (arg2Node->OperIs(GT_LCL_VAR)) { GenTreeLclVarCommon* arg2LclNode = arg2Node->AsLclVarCommon(); assert(arg2LclNode->GetLclNum() != lclNum); LclVarDsc* arg2Dsc = lvaGetDesc(arg2LclNode); // Other params can't alias implicit byref params if (arg2Dsc->lvIsParam) { checkExposure = false; } } // Because we're checking TYP_I_IMPL above, at least // screen out obvious things that can't cause aliases. else if (arg2Node->IsIntegralConst()) { checkExposure = false; } if (checkExposure) { JITDUMP( "... not sure where byref arg points, checking if V%02u is exposed\n", lclNum); // arg2 might alias arg, see if we've exposed // arg somewhere in the method. if (varDsc->lvHasLdAddrOp || varDsc->IsAddressExposed()) { // Struct as a whole is exposed, can't optimize JITDUMP("... V%02u is exposed\n", lclNum); hasExposure = true; } else if (varDsc->lvFieldLclStart != 0) { // This is the promoted/undone struct case. // // The field start is actually the local number of the promoted local, // use it to enumerate the fields. const unsigned promotedLcl = varDsc->lvFieldLclStart; LclVarDsc* const promotedVarDsc = lvaGetDesc(promotedLcl); JITDUMP("...promoted-unpromoted case -- also checking exposure of " "fields of V%02u\n", promotedLcl); for (unsigned fieldIndex = 0; fieldIndex < promotedVarDsc->lvFieldCnt; fieldIndex++) { LclVarDsc* fieldDsc = lvaGetDesc(promotedVarDsc->lvFieldLclStart + fieldIndex); if (fieldDsc->lvHasLdAddrOp || fieldDsc->IsAddressExposed()) { // Promoted and not yet demoted field is exposed, can't optimize JITDUMP("... field V%02u is exposed\n", promotedVarDsc->lvFieldLclStart + fieldIndex); hasExposure = true; break; } } } } if (hasExposure) { interferingArg = arg2->GetNode(); break; } } else { JITDUMP("...arg is not a byref or implicit byref (%s)\n", varTypeName(arg2->GetNode()->TypeGet())); } } if (interferingArg != nullptr) { JITDUMP("... no, arg [%06u] may alias with V%02u\n", dspTreeID(interferingArg), lclNum); } else { JITDUMP("... yes, no other arg in call can alias V%02u\n", lclNum); hasMustCopyByrefParameter = false; } } else { JITDUMP(" ... no, call has %u > %u args, alias analysis deemed too costly\n", argInfo->ArgCount(), argLimit); } } } } if (hasMustCopyByrefParameter) { // This arg requires a struct copy. No reason to keep scanning the remaining args. break; } } } } return hasMustCopyByrefParameter; } #endif //------------------------------------------------------------------------ // fgMorphPotentialTailCall: Attempt to morph a call that the importer has // identified as a potential tailcall to an actual tailcall and return the // placeholder node to use in this case. // // Arguments: // call - The call to morph. // // Return Value: // Returns a node to use if the call was morphed into a tailcall. If this // function returns a node the call is done being morphed and the new node // should be used. Otherwise the call will have been demoted to a regular call // and should go through normal morph. // // Notes: // This is called only for calls that the importer has already identified as // potential tailcalls. It will do profitability and legality checks and // classify which kind of tailcall we are able to (or should) do, along with // modifying the trees to perform that kind of tailcall. // GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) { // It should either be an explicit (i.e. tail prefixed) or an implicit tail call assert(call->IsTailPrefixedCall() ^ call->IsImplicitTailCall()); // It cannot be an inline candidate assert(!call->IsInlineCandidate()); auto failTailCall = [&](const char* reason, unsigned lclNum = BAD_VAR_NUM) { #ifdef DEBUG if (verbose) { printf("\nRejecting tail call in morph for call "); printTreeID(call); printf(": %s", reason); if (lclNum != BAD_VAR_NUM) { printf(" V%02u", lclNum); } printf("\n"); } #endif // for non user funcs, we have no handles to report info.compCompHnd->reportTailCallDecision(nullptr, (call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr, call->IsTailPrefixedCall(), TAILCALL_FAIL, reason); // We have checked the candidate so demote. call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; #if FEATURE_TAILCALL_OPT call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL; #endif }; if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) { failTailCall("Might turn into an intrinsic"); return nullptr; } if (call->IsNoReturn() && !call->IsTailPrefixedCall()) { // Such tail calls always throw an exception and we won't be able to see current // Caller() in the stacktrace. failTailCall("Never returns"); return nullptr; } #ifdef DEBUG if (opts.compGcChecks && (info.compRetType == TYP_REF)) { failTailCall("COMPlus_JitGCChecks or stress might have interposed a call to CORINFO_HELP_CHECK_OBJ, " "invalidating tailcall opportunity"); return nullptr; } #endif // We have to ensure to pass the incoming retValBuf as the // outgoing one. Using a temp will not do as this function will // not regain control to do the copy. This can happen when inlining // a tailcall which also has a potential tailcall in it: the IL looks // like we can do a tailcall, but the trees generated use a temp for the inlinee's // result. TODO-CQ: Fix this. if (info.compRetBuffArg != BAD_VAR_NUM) { noway_assert(call->TypeGet() == TYP_VOID); GenTree* retValBuf = call->gtCallArgs->GetNode(); if (retValBuf->gtOper != GT_LCL_VAR || retValBuf->AsLclVarCommon()->GetLclNum() != info.compRetBuffArg) { failTailCall("Need to copy return buffer"); return nullptr; } } // We are still not sure whether it can be a tail call. Because, when converting // a call to an implicit tail call, we must check that there are no locals with // their address taken. If this is the case, we have to assume that the address // has been leaked and the current stack frame must live until after the final // call. // Verify that none of vars has lvHasLdAddrOp or IsAddressExposed() bit set. Note // that lvHasLdAddrOp is much more conservative. We cannot just base it on // IsAddressExposed() alone since it is not guaranteed to be set on all VarDscs // during morph stage. The reason for also checking IsAddressExposed() is that in case // of vararg methods user args are marked as addr exposed but not lvHasLdAddrOp. // The combination of lvHasLdAddrOp and IsAddressExposed() though conservative allows us // never to be incorrect. // // TODO-Throughput: have a compiler level flag to indicate whether method has vars whose // address is taken. Such a flag could be set whenever lvHasLdAddrOp or IsAddressExposed() // is set. This avoids the need for iterating through all lcl vars of the current // method. Right now throughout the code base we are not consistently using 'set' // method to set lvHasLdAddrOp and IsAddressExposed() flags. bool isImplicitOrStressTailCall = call->IsImplicitTailCall() || call->IsStressTailCall(); if (isImplicitOrStressTailCall && compLocallocUsed) { failTailCall("Localloc used"); return nullptr; } bool hasStructParam = false; for (unsigned varNum = 0; varNum < lvaCount; varNum++) { LclVarDsc* varDsc = lvaGetDesc(varNum); // If the method is marked as an explicit tail call we will skip the // following three hazard checks. // We still must check for any struct parameters and set 'hasStructParam' // so that we won't transform the recursive tail call into a loop. // if (isImplicitOrStressTailCall) { if (varDsc->lvHasLdAddrOp && !lvaIsImplicitByRefLocal(varNum)) { failTailCall("Local address taken", varNum); return nullptr; } if (varDsc->IsAddressExposed()) { if (lvaIsImplicitByRefLocal(varNum)) { // The address of the implicit-byref is a non-address use of the pointer parameter. } else if (varDsc->lvIsStructField && lvaIsImplicitByRefLocal(varDsc->lvParentLcl)) { // The address of the implicit-byref's field is likewise a non-address use of the pointer // parameter. } else if (varDsc->lvPromoted && (lvaTable[varDsc->lvFieldLclStart].lvParentLcl != varNum)) { // This temp was used for struct promotion bookkeeping. It will not be used, and will have // its ref count and address-taken flag reset in fgMarkDemotedImplicitByRefArgs. assert(lvaIsImplicitByRefLocal(lvaTable[varDsc->lvFieldLclStart].lvParentLcl)); assert(fgGlobalMorph); } else { failTailCall("Local address taken", varNum); return nullptr; } } if (varDsc->lvPromoted && varDsc->lvIsParam && !lvaIsImplicitByRefLocal(varNum)) { failTailCall("Has Struct Promoted Param", varNum); return nullptr; } if (varDsc->lvPinned) { // A tail call removes the method from the stack, which means the pinning // goes away for the callee. We can't allow that. failTailCall("Has Pinned Vars", varNum); return nullptr; } } if (varTypeIsStruct(varDsc->TypeGet()) && varDsc->lvIsParam) { hasStructParam = true; // This prevents transforming a recursive tail call into a loop // but doesn't prevent tail call optimization so we need to // look at the rest of parameters. } } if (!fgCheckStmtAfterTailCall()) { failTailCall("Unexpected statements after the tail call"); return nullptr; } const char* failReason = nullptr; bool canFastTailCall = fgCanFastTailCall(call, &failReason); CORINFO_TAILCALL_HELPERS tailCallHelpers; bool tailCallViaJitHelper = false; if (!canFastTailCall) { if (call->IsImplicitTailCall()) { // Implicit or opportunistic tail calls are always dispatched via fast tail call // mechanism and never via tail call helper for perf. failTailCall(failReason); return nullptr; } assert(call->IsTailPrefixedCall()); assert(call->tailCallInfo != nullptr); // We do not currently handle non-standard args except for VSD stubs. if (!call->IsVirtualStub() && call->HasNonStandardAddedArgs(this)) { failTailCall( "Method with non-standard args passed in callee trash register cannot be tail called via helper"); return nullptr; } // On x86 we have a faster mechanism than the general one which we use // in almost all cases. See fgCanTailCallViaJitHelper for more information. if (fgCanTailCallViaJitHelper()) { tailCallViaJitHelper = true; } else { // Make sure we can get the helpers. We do this last as the runtime // will likely be required to generate these. CORINFO_RESOLVED_TOKEN* token = nullptr; CORINFO_SIG_INFO* sig = call->tailCallInfo->GetSig(); unsigned flags = 0; if (!call->tailCallInfo->IsCalli()) { token = call->tailCallInfo->GetToken(); if (call->tailCallInfo->IsCallvirt()) { flags |= CORINFO_TAILCALL_IS_CALLVIRT; } } if (call->gtCallThisArg != nullptr) { var_types thisArgType = call->gtCallThisArg->GetNode()->TypeGet(); if (thisArgType != TYP_REF) { flags |= CORINFO_TAILCALL_THIS_ARG_IS_BYREF; } } if (!info.compCompHnd->getTailCallHelpers(token, sig, (CORINFO_GET_TAILCALL_HELPERS_FLAGS)flags, &tailCallHelpers)) { failTailCall("Tail call help not available"); return nullptr; } } } // Check if we can make the tailcall a loop. bool fastTailCallToLoop = false; #if FEATURE_TAILCALL_OPT // TODO-CQ: enable the transformation when the method has a struct parameter that can be passed in a register // or return type is a struct that can be passed in a register. // // TODO-CQ: if the method being compiled requires generic context reported in gc-info (either through // hidden generic context param or through keep alive thisptr), then while transforming a recursive // call to such a method requires that the generic context stored on stack slot be updated. Right now, // fgMorphRecursiveFastTailCallIntoLoop() is not handling update of generic context while transforming // a recursive call into a loop. Another option is to modify gtIsRecursiveCall() to check that the // generic type parameters of both caller and callee generic method are the same. if (opts.compTailCallLoopOpt && canFastTailCall && gtIsRecursiveCall(call) && !lvaReportParamTypeArg() && !lvaKeepAliveAndReportThis() && !call->IsVirtual() && !hasStructParam && !varTypeIsStruct(call->TypeGet())) { fastTailCallToLoop = true; } #endif // Ok -- now we are committed to performing a tailcall. Report the decision. CorInfoTailCall tailCallResult; if (fastTailCallToLoop) { tailCallResult = TAILCALL_RECURSIVE; } else if (canFastTailCall) { tailCallResult = TAILCALL_OPTIMIZED; } else { tailCallResult = TAILCALL_HELPER; } info.compCompHnd->reportTailCallDecision(nullptr, (call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr, call->IsTailPrefixedCall(), tailCallResult, nullptr); // Are we currently planning to expand the gtControlExpr as an early virtual call target? // if (call->IsExpandedEarly() && call->IsVirtualVtable()) { // It isn't alway profitable to expand a virtual call early // // We alway expand the TAILCALL_HELPER type late. // And we exapnd late when we have an optimized tail call // and the this pointer needs to be evaluated into a temp. // if (tailCallResult == TAILCALL_HELPER) { // We will alway expand this late in lower instead. // (see LowerTailCallViaJitHelper as it needs some work // for us to be able to expand this earlier in morph) // call->ClearExpandedEarly(); } else if ((tailCallResult == TAILCALL_OPTIMIZED) && ((call->gtCallThisArg->GetNode()->gtFlags & GTF_SIDE_EFFECT) != 0)) { // We generate better code when we expand this late in lower instead. // call->ClearExpandedEarly(); } } // Now actually morph the call. compTailCallUsed = true; // This will prevent inlining this call. call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL; if (tailCallViaJitHelper) { call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL_VIA_JIT_HELPER; } #if FEATURE_TAILCALL_OPT if (fastTailCallToLoop) { call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL_TO_LOOP; } #endif // Mark that this is no longer a pending tailcall. We need to do this before // we call fgMorphCall again (which happens in the fast tailcall case) to // avoid recursing back into this method. call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; #if FEATURE_TAILCALL_OPT call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL; #endif #ifdef DEBUG if (verbose) { printf("\nGTF_CALL_M_TAILCALL bit set for call "); printTreeID(call); printf("\n"); if (fastTailCallToLoop) { printf("\nGTF_CALL_M_TAILCALL_TO_LOOP bit set for call "); printTreeID(call); printf("\n"); } } #endif // For R2R we might need a different entry point for this call if we are doing a tailcall. // The reason is that the normal delay load helper uses the return address to find the indirection // cell in xarch, but now the JIT is expected to leave the indirection cell in REG_R2R_INDIRECT_PARAM: // We optimize delegate invocations manually in the JIT so skip this for those. if (call->IsR2RRelativeIndir() && canFastTailCall && !fastTailCallToLoop && !call->IsDelegateInvoke()) { info.compCompHnd->updateEntryPointForTailCall(&call->gtEntryPoint); #ifdef TARGET_XARCH // We have already computed arg info to make the fast tailcall decision, but on X64 we now // have to pass the indirection cell, so redo arg info. call->ResetArgInfo(); #endif } // If this block has a flow successor, make suitable updates. // BasicBlock* const nextBlock = compCurBB->GetUniqueSucc(); if (nextBlock == nullptr) { // No unique successor. compCurBB should be a return. // assert(compCurBB->bbJumpKind == BBJ_RETURN); } else { // Flow no longer reaches nextBlock from here. // fgRemoveRefPred(nextBlock, compCurBB); // Adjust profile weights. // // Note if this is a tail call to loop, further updates // are needed once we install the loop edge. // if (compCurBB->hasProfileWeight() && nextBlock->hasProfileWeight()) { // Since we have linear flow we can update the next block weight. // weight_t const blockWeight = compCurBB->bbWeight; weight_t const nextWeight = nextBlock->bbWeight; weight_t const newNextWeight = nextWeight - blockWeight; // If the math would result in a negative weight then there's // no local repair we can do; just leave things inconsistent. // if (newNextWeight >= 0) { // Note if we'd already morphed the IR in nextblock we might // have done something profile sensitive that we should arguably reconsider. // JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n", nextBlock->bbNum, nextWeight, newNextWeight); nextBlock->setBBProfileWeight(newNextWeight); } else { JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT " is less than direct flow pred " FMT_BB " weight " FMT_WT "\n", nextBlock->bbNum, nextWeight, compCurBB->bbNum, blockWeight); } // If nextBlock is not a BBJ_RETURN, it should have a unique successor that // is a BBJ_RETURN, as we allow a little bit of flow after a tail call. // if (nextBlock->bbJumpKind != BBJ_RETURN) { BasicBlock* retBlock = nextBlock->GetUniqueSucc(); // Check if we have a sequence of GT_ASG blocks where the same variable is assigned // to temp locals over and over. // Also allow casts on the RHSs of the assignments, and blocks with GT_NOPs. // // { GT_ASG(t_0, GT_CALL(...)) } // { GT_ASG(t_1, t0) } (with casts on rhs potentially) // ... // { GT_ASG(t_n, t_(n - 1)) } // { GT_RET t_n } // if (retBlock->bbJumpKind != BBJ_RETURN) { // Make sure the block has a single statement assert(nextBlock->firstStmt() == nextBlock->lastStmt()); // And the root node is "ASG(LCL_VAR, LCL_VAR)" GenTree* asgNode = nextBlock->firstStmt()->GetRootNode(); assert(asgNode->OperIs(GT_ASG)); unsigned lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum(); while (retBlock->bbJumpKind != BBJ_RETURN) { #ifdef DEBUG Statement* nonEmptyStmt = nullptr; for (Statement* const stmt : retBlock->Statements()) { // Ignore NOP statements if (!stmt->GetRootNode()->OperIs(GT_NOP)) { // Only a single non-NOP statement is allowed assert(nonEmptyStmt == nullptr); nonEmptyStmt = stmt; } } if (nonEmptyStmt != nullptr) { asgNode = nonEmptyStmt->GetRootNode(); if (!asgNode->OperIs(GT_NOP)) { assert(asgNode->OperIs(GT_ASG)); GenTree* rhs = asgNode->gtGetOp2(); while (rhs->OperIs(GT_CAST)) { assert(!rhs->gtOverflow()); rhs = rhs->gtGetOp1(); } assert(lcl == rhs->AsLclVarCommon()->GetLclNum()); lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum(); } } #endif retBlock = retBlock->GetUniqueSucc(); } } assert(retBlock->bbJumpKind == BBJ_RETURN); if (retBlock->hasProfileWeight()) { // Do similar updates here. // weight_t const nextNextWeight = retBlock->bbWeight; weight_t const newNextNextWeight = nextNextWeight - blockWeight; // If the math would result in an negative weight then there's // no local repair we can do; just leave things inconsistent. // if (newNextNextWeight >= 0) { JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n", retBlock->bbNum, nextNextWeight, newNextNextWeight); retBlock->setBBProfileWeight(newNextNextWeight); } else { JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT " is less than direct flow pred " FMT_BB " weight " FMT_WT "\n", retBlock->bbNum, nextNextWeight, compCurBB->bbNum, blockWeight); } } } } } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // We enable shared-ret tail call optimization for recursive calls even if // FEATURE_TAILCALL_OPT_SHARED_RETURN is not defined. if (gtIsRecursiveCall(call)) #endif { // Many tailcalls will have call and ret in the same block, and thus be // BBJ_RETURN, but if the call falls through to a ret, and we are doing a // tailcall, change it here. compCurBB->bbJumpKind = BBJ_RETURN; } GenTree* stmtExpr = fgMorphStmt->GetRootNode(); #ifdef DEBUG // Tail call needs to be in one of the following IR forms // Either a call stmt or // GT_RETURN(GT_CALL(..)) or GT_RETURN(GT_CAST(GT_CALL(..))) // var = GT_CALL(..) or var = (GT_CAST(GT_CALL(..))) // GT_COMMA(GT_CALL(..), GT_NOP) or GT_COMMA(GT_CAST(GT_CALL(..)), GT_NOP) // In the above, // GT_CASTS may be nested. genTreeOps stmtOper = stmtExpr->gtOper; if (stmtOper == GT_CALL) { assert(stmtExpr == call); } else { assert(stmtOper == GT_RETURN || stmtOper == GT_ASG || stmtOper == GT_COMMA); GenTree* treeWithCall; if (stmtOper == GT_RETURN) { treeWithCall = stmtExpr->gtGetOp1(); } else if (stmtOper == GT_COMMA) { // Second operation must be nop. assert(stmtExpr->gtGetOp2()->IsNothingNode()); treeWithCall = stmtExpr->gtGetOp1(); } else { treeWithCall = stmtExpr->gtGetOp2(); } // Peel off casts while (treeWithCall->gtOper == GT_CAST) { assert(!treeWithCall->gtOverflow()); treeWithCall = treeWithCall->gtGetOp1(); } assert(treeWithCall == call); } #endif // Store the call type for later to introduce the correct placeholder. var_types origCallType = call->TypeGet(); GenTree* result; if (!canFastTailCall && !tailCallViaJitHelper) { // For tailcall via CORINFO_TAILCALL_HELPERS we transform into regular // calls with (to the JIT) regular control flow so we do not need to do // much special handling. result = fgMorphTailCallViaHelpers(call, tailCallHelpers); } else { // Otherwise we will transform into something that does not return. For // fast tailcalls a "jump" and for tailcall via JIT helper a call to a // JIT helper that does not return. So peel off everything after the // call. Statement* nextMorphStmt = fgMorphStmt->GetNextStmt(); JITDUMP("Remove all stmts after the call.\n"); while (nextMorphStmt != nullptr) { Statement* stmtToRemove = nextMorphStmt; nextMorphStmt = stmtToRemove->GetNextStmt(); fgRemoveStmt(compCurBB, stmtToRemove); } bool isRootReplaced = false; GenTree* root = fgMorphStmt->GetRootNode(); if (root != call) { JITDUMP("Replace root node [%06d] with [%06d] tail call node.\n", dspTreeID(root), dspTreeID(call)); isRootReplaced = true; fgMorphStmt->SetRootNode(call); } // Avoid potential extra work for the return (for example, vzeroupper) call->gtType = TYP_VOID; // The runtime requires that we perform a null check on the `this` argument before // tail calling to a virtual dispatch stub. This requirement is a consequence of limitations // in the runtime's ability to map an AV to a NullReferenceException if // the AV occurs in a dispatch stub that has unmanaged caller. if (call->IsVirtualStub()) { call->gtFlags |= GTF_CALL_NULLCHECK; } // Do some target-specific transformations (before we process the args, // etc.) for the JIT helper case. if (tailCallViaJitHelper) { fgMorphTailCallViaJitHelper(call); // Force re-evaluating the argInfo. fgMorphTailCallViaJitHelper will modify the // argument list, invalidating the argInfo. call->fgArgInfo = nullptr; } // Tail call via JIT helper: The VM can't use return address hijacking // if we're not going to return and the helper doesn't have enough info // to safely poll, so we poll before the tail call, if the block isn't // already safe. Since tail call via helper is a slow mechanism it // doen't matter whether we emit GC poll. his is done to be in parity // with Jit64. Also this avoids GC info size increase if all most all // methods are expected to be tail calls (e.g. F#). // // Note that we can avoid emitting GC-poll if we know that the current // BB is dominated by a Gc-SafePoint block. But we don't have dominator // info at this point. One option is to just add a place holder node for // GC-poll (e.g. GT_GCPOLL) here and remove it in lowering if the block // is dominated by a GC-SafePoint. For now it not clear whether // optimizing slow tail calls is worth the effort. As a low cost check, // we check whether the first and current basic blocks are // GC-SafePoints. // // Fast Tail call as epilog+jmp - No need to insert GC-poll. Instead, // fgSetBlockOrder() is going to mark the method as fully interruptible // if the block containing this tail call is reachable without executing // any call. BasicBlock* curBlock = compCurBB; if (canFastTailCall || (fgFirstBB->bbFlags & BBF_GC_SAFE_POINT) || (compCurBB->bbFlags & BBF_GC_SAFE_POINT) || (fgCreateGCPoll(GCPOLL_INLINE, compCurBB) == curBlock)) { // We didn't insert a poll block, so we need to morph the call now // (Normally it will get morphed when we get to the split poll block) GenTree* temp = fgMorphCall(call); noway_assert(temp == call); } // Fast tail call: in case of fast tail calls, we need a jmp epilog and // hence mark it as BBJ_RETURN with BBF_JMP flag set. noway_assert(compCurBB->bbJumpKind == BBJ_RETURN); if (canFastTailCall) { compCurBB->bbFlags |= BBF_HAS_JMP; } else { // We call CORINFO_HELP_TAILCALL which does not return, so we will // not need epilogue. compCurBB->bbJumpKind = BBJ_THROW; } if (isRootReplaced) { // We have replaced the root node of this stmt and deleted the rest, // but we still have the deleted, dead nodes on the `fgMorph*` stack // if the root node was an `ASG`, `RET` or `CAST`. // Return a zero con node to exit morphing of the old trees without asserts // and forbid POST_ORDER morphing doing something wrong with our call. var_types callType; if (varTypeIsStruct(origCallType)) { CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd; Compiler::structPassingKind howToReturnStruct; callType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct); assert((howToReturnStruct != SPK_Unknown) && (howToReturnStruct != SPK_ByReference)); if (howToReturnStruct == SPK_ByValue) { callType = TYP_I_IMPL; } else if (howToReturnStruct == SPK_ByValueAsHfa || varTypeIsSIMD(callType)) { callType = TYP_FLOAT; } assert((callType != TYP_UNKNOWN) && !varTypeIsStruct(callType)); } else { callType = origCallType; } assert((callType != TYP_UNKNOWN) && !varTypeIsStruct(callType)); callType = genActualType(callType); GenTree* zero = gtNewZeroConNode(callType); result = fgMorphTree(zero); } else { result = call; } } return result; } //------------------------------------------------------------------------ // fgMorphTailCallViaHelpers: Transform the given GT_CALL tree for tailcall code // generation. // // Arguments: // call - The call to transform // helpers - The tailcall helpers provided by the runtime. // // Return Value: // Returns the transformed node. // // Notes: // This transforms // GT_CALL // {callTarget} // {this} // {args} // into // GT_COMMA // GT_CALL StoreArgsStub // {callTarget} (depending on flags provided by the runtime) // {this} (as a regular arg) // {args} // GT_COMMA // GT_CALL Dispatcher // GT_ADDR ReturnAddress // {CallTargetStub} // GT_ADDR ReturnValue // GT_LCL ReturnValue // whenever the call node returns a value. If the call node does not return a // value the last comma will not be there. // GenTree* Compiler::fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help) { // R2R requires different handling but we don't support tailcall via // helpers in R2R yet, so just leave it for now. // TODO: R2R: TailCallViaHelper assert(!opts.IsReadyToRun()); JITDUMP("fgMorphTailCallViaHelpers (before):\n"); DISPTREE(call); // Don't support tail calling helper methods assert(call->gtCallType != CT_HELPER); // We come this route only for tail prefixed calls that cannot be dispatched as // fast tail calls assert(!call->IsImplicitTailCall()); // We want to use the following assert, but it can modify the IR in some cases, so we // can't do that in an assert. // assert(!fgCanFastTailCall(call, nullptr)); // We might or might not have called fgInitArgInfo before this point: in // builds with FEATURE_FASTTAILCALL we will have called it when checking if // we could do a fast tailcall, so it is possible we have added extra IR // for non-standard args that we must get rid of. Get rid of that IR here // and do this first as it will 'expose' the retbuf as the first arg, which // we rely upon in fgCreateCallDispatcherAndGetResult. call->ResetArgInfo(); GenTree* callDispatcherAndGetResult = fgCreateCallDispatcherAndGetResult(call, help.hCallTarget, help.hDispatcher); // Change the call to a call to the StoreArgs stub. if (call->HasRetBufArg()) { JITDUMP("Removing retbuf"); call->gtCallArgs = call->gtCallArgs->GetNext(); call->gtCallMoreFlags &= ~GTF_CALL_M_RETBUFFARG; } const bool stubNeedsTargetFnPtr = (help.flags & CORINFO_TAILCALL_STORE_TARGET) != 0; GenTree* doBeforeStoreArgsStub = nullptr; GenTree* thisPtrStubArg = nullptr; // Put 'this' in normal param list if (call->gtCallThisArg != nullptr) { JITDUMP("Moving this pointer into arg list\n"); GenTree* objp = call->gtCallThisArg->GetNode(); GenTree* thisPtr = nullptr; call->gtCallThisArg = nullptr; // JIT will need one or two copies of "this" in the following cases: // 1) the call needs null check; // 2) StoreArgs stub needs the target function pointer address and if the call is virtual // the stub also needs "this" in order to evalute the target. const bool callNeedsNullCheck = call->NeedsNullCheck(); const bool stubNeedsThisPtr = stubNeedsTargetFnPtr && call->IsVirtual(); // TODO-Review: The following transformation is implemented under assumption that // both conditions can be true. However, I could not construct such example // where a virtual tail call would require null check. In case, if the conditions // are mutually exclusive the following could be simplified. if (callNeedsNullCheck || stubNeedsThisPtr) { // Clone "this" if "this" has no side effects. if ((objp->gtFlags & GTF_SIDE_EFFECT) == 0) { thisPtr = gtClone(objp, true); } // Create a temp and spill "this" to the temp if "this" has side effects or "this" was too complex to clone. if (thisPtr == nullptr) { const unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr")); // tmp = "this" doBeforeStoreArgsStub = gtNewTempAssign(lclNum, objp); if (callNeedsNullCheck) { // COMMA(tmp = "this", deref(tmp)) GenTree* tmp = gtNewLclvNode(lclNum, objp->TypeGet()); GenTree* nullcheck = gtNewNullCheck(tmp, compCurBB); doBeforeStoreArgsStub = gtNewOperNode(GT_COMMA, TYP_VOID, doBeforeStoreArgsStub, nullcheck); } thisPtr = gtNewLclvNode(lclNum, objp->TypeGet()); if (stubNeedsThisPtr) { thisPtrStubArg = gtNewLclvNode(lclNum, objp->TypeGet()); } } else { if (callNeedsNullCheck) { // deref("this") doBeforeStoreArgsStub = gtNewNullCheck(objp, compCurBB); if (stubNeedsThisPtr) { thisPtrStubArg = gtClone(objp, true); } } else { assert(stubNeedsThisPtr); thisPtrStubArg = objp; } } call->gtFlags &= ~GTF_CALL_NULLCHECK; assert((thisPtrStubArg != nullptr) == stubNeedsThisPtr); } else { thisPtr = objp; } // During rationalization tmp="this" and null check will be materialized // in the right execution order. assert(thisPtr != nullptr); call->gtCallArgs = gtPrependNewCallArg(thisPtr, call->gtCallArgs); } // We may need to pass the target, for instance for calli or generic methods // where we pass instantiating stub. if (stubNeedsTargetFnPtr) { JITDUMP("Adding target since VM requested it\n"); GenTree* target; if (!call->IsVirtual()) { if (call->gtCallType == CT_INDIRECT) { noway_assert(call->gtCallAddr != nullptr); target = call->gtCallAddr; } else { CORINFO_CONST_LOOKUP addrInfo; info.compCompHnd->getFunctionEntryPoint(call->gtCallMethHnd, &addrInfo); CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(addrInfo.accessType != IAT_PPVALUE && addrInfo.accessType != IAT_RELPVALUE); if (addrInfo.accessType == IAT_VALUE) { handle = addrInfo.handle; } else if (addrInfo.accessType == IAT_PVALUE) { pIndirection = addrInfo.addr; } target = gtNewIconEmbHndNode(handle, pIndirection, GTF_ICON_FTN_ADDR, call->gtCallMethHnd); } } else { assert(!call->tailCallInfo->GetSig()->hasTypeArg()); CORINFO_CALL_INFO callInfo; unsigned flags = CORINFO_CALLINFO_LDFTN; if (call->tailCallInfo->IsCallvirt()) { flags |= CORINFO_CALLINFO_CALLVIRT; } eeGetCallInfo(call->tailCallInfo->GetToken(), nullptr, (CORINFO_CALLINFO_FLAGS)flags, &callInfo); target = getVirtMethodPointerTree(thisPtrStubArg, call->tailCallInfo->GetToken(), &callInfo); } // Insert target as last arg GenTreeCall::Use** newArgSlot = &call->gtCallArgs; while (*newArgSlot != nullptr) { newArgSlot = &(*newArgSlot)->NextRef(); } *newArgSlot = gtNewCallArgs(target); } // This is now a direct call to the store args stub and not a tailcall. call->gtCallType = CT_USER_FUNC; call->gtCallMethHnd = help.hStoreArgs; call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK; call->gtCallMoreFlags &= ~(GTF_CALL_M_TAILCALL | GTF_CALL_M_DELEGATE_INV | GTF_CALL_M_WRAPPER_DELEGATE_INV); // The store-args stub returns no value. call->gtRetClsHnd = nullptr; call->gtType = TYP_VOID; call->gtReturnType = TYP_VOID; GenTree* callStoreArgsStub = call; if (doBeforeStoreArgsStub != nullptr) { callStoreArgsStub = gtNewOperNode(GT_COMMA, TYP_VOID, doBeforeStoreArgsStub, callStoreArgsStub); } GenTree* finalTree = gtNewOperNode(GT_COMMA, callDispatcherAndGetResult->TypeGet(), callStoreArgsStub, callDispatcherAndGetResult); finalTree = fgMorphTree(finalTree); JITDUMP("fgMorphTailCallViaHelpers (after):\n"); DISPTREE(finalTree); return finalTree; } //------------------------------------------------------------------------ // fgCreateCallDispatcherAndGetResult: Given a call // CALL // {callTarget} // {retbuf} // {this} // {args} // create a similarly typed node that calls the tailcall dispatcher and returns // the result, as in the following: // COMMA // CALL TailCallDispatcher // ADDR ReturnAddress // &CallTargetFunc // ADDR RetValue // RetValue // If the call has type TYP_VOID, only create the CALL node. // // Arguments: // origCall - the call // callTargetStubHnd - the handle of the CallTarget function (this is a special // IL stub created by the runtime) // dispatcherHnd - the handle of the tailcall dispatcher function // // Return Value: // A node that can be used in place of the original call. // GenTree* Compiler::fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall, CORINFO_METHOD_HANDLE callTargetStubHnd, CORINFO_METHOD_HANDLE dispatcherHnd) { GenTreeCall* callDispatcherNode = gtNewCallNode(CT_USER_FUNC, dispatcherHnd, TYP_VOID, nullptr, fgMorphStmt->GetDebugInfo()); // The dispatcher has signature // void DispatchTailCalls(void* callersRetAddrSlot, void* callTarget, void* retValue) // Add return value arg. GenTree* retValArg; GenTree* retVal = nullptr; unsigned int newRetLcl = BAD_VAR_NUM; GenTree* copyToRetBufNode = nullptr; if (origCall->HasRetBufArg()) { JITDUMP("Transferring retbuf\n"); GenTree* retBufArg = origCall->gtCallArgs->GetNode(); assert(info.compRetBuffArg != BAD_VAR_NUM); assert(retBufArg->OperIsLocal()); assert(retBufArg->AsLclVarCommon()->GetLclNum() == info.compRetBuffArg); // Caller return buffer argument retBufArg can point to GC heap while the dispatcher expects // the return value argument retValArg to point to the stack. // We use a temporary stack allocated return buffer to hold the value during the dispatcher call // and copy the value back to the caller return buffer after that. unsigned int tmpRetBufNum = lvaGrabTemp(true DEBUGARG("substitute local for return buffer")); constexpr bool unsafeValueClsCheck = false; lvaSetStruct(tmpRetBufNum, origCall->gtRetClsHnd, unsafeValueClsCheck); lvaSetVarAddrExposed(tmpRetBufNum DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF)); var_types tmpRetBufType = lvaGetDesc(tmpRetBufNum)->TypeGet(); retValArg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(tmpRetBufNum, tmpRetBufType)); var_types callerRetBufType = lvaGetDesc(info.compRetBuffArg)->TypeGet(); GenTree* dstAddr = gtNewLclvNode(info.compRetBuffArg, callerRetBufType); GenTree* dst = gtNewObjNode(info.compMethodInfo->args.retTypeClass, dstAddr); GenTree* src = gtNewLclvNode(tmpRetBufNum, tmpRetBufType); constexpr bool isVolatile = false; constexpr bool isCopyBlock = true; copyToRetBufNode = gtNewBlkOpNode(dst, src, isVolatile, isCopyBlock); if (origCall->gtType != TYP_VOID) { retVal = gtClone(retBufArg); } } else if (origCall->gtType != TYP_VOID) { JITDUMP("Creating a new temp for the return value\n"); newRetLcl = lvaGrabTemp(false DEBUGARG("Return value for tail call dispatcher")); if (varTypeIsStruct(origCall->gtType)) { lvaSetStruct(newRetLcl, origCall->gtRetClsHnd, false); } else { // Since we pass a reference to the return value to the dispatcher // we need to use the real return type so we can normalize it on // load when we return it. lvaTable[newRetLcl].lvType = (var_types)origCall->gtReturnType; } lvaSetVarAddrExposed(newRetLcl DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF)); retValArg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(newRetLcl, genActualType(lvaTable[newRetLcl].lvType))); retVal = gtNewLclvNode(newRetLcl, genActualType(lvaTable[newRetLcl].lvType)); if (varTypeIsStruct(origCall->gtType)) { retVal = impFixupStructReturnType(retVal, origCall->gtRetClsHnd, origCall->GetUnmanagedCallConv()); } } else { JITDUMP("No return value so using null pointer as arg\n"); retValArg = gtNewZeroConNode(TYP_I_IMPL); } callDispatcherNode->gtCallArgs = gtPrependNewCallArg(retValArg, callDispatcherNode->gtCallArgs); // Add callTarget callDispatcherNode->gtCallArgs = gtPrependNewCallArg(new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, callTargetStubHnd), callDispatcherNode->gtCallArgs); // Add the caller's return address slot. if (lvaRetAddrVar == BAD_VAR_NUM) { lvaRetAddrVar = lvaGrabTemp(false DEBUGARG("Return address")); lvaTable[lvaRetAddrVar].lvType = TYP_I_IMPL; lvaSetVarAddrExposed(lvaRetAddrVar DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF)); } GenTree* retAddrSlot = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaRetAddrVar, TYP_I_IMPL)); callDispatcherNode->gtCallArgs = gtPrependNewCallArg(retAddrSlot, callDispatcherNode->gtCallArgs); GenTree* finalTree = callDispatcherNode; if (copyToRetBufNode != nullptr) { finalTree = gtNewOperNode(GT_COMMA, TYP_VOID, callDispatcherNode, copyToRetBufNode); } if (origCall->gtType == TYP_VOID) { return finalTree; } assert(retVal != nullptr); finalTree = gtNewOperNode(GT_COMMA, origCall->TypeGet(), finalTree, retVal); // The JIT seems to want to CSE this comma and messes up multi-reg ret // values in the process. Just avoid CSE'ing this tree entirely in that // case. if (origCall->HasMultiRegRetVal()) { finalTree->gtFlags |= GTF_DONT_CSE; } return finalTree; } //------------------------------------------------------------------------ // getLookupTree: get a lookup tree // // Arguments: // pResolvedToken - resolved token of the call // pLookup - the lookup to get the tree for // handleFlags - flags to set on the result node // compileTimeHandle - compile-time handle corresponding to the lookup // // Return Value: // A node representing the lookup tree // GenTree* Compiler::getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { if (!pLookup->lookupKind.needsRuntimeLookup) { // No runtime lookup is required. // Access is direct or memory-indirect (of a fixed address) reference CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE); if (pLookup->constLookup.accessType == IAT_VALUE) { handle = pLookup->constLookup.handle; } else if (pLookup->constLookup.accessType == IAT_PVALUE) { pIndirection = pLookup->constLookup.addr; } return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); } return getRuntimeLookupTree(pResolvedToken, pLookup, compileTimeHandle); } //------------------------------------------------------------------------ // getRuntimeLookupTree: get a tree for a runtime lookup // // Arguments: // pResolvedToken - resolved token of the call // pLookup - the lookup to get the tree for // compileTimeHandle - compile-time handle corresponding to the lookup // // Return Value: // A node representing the runtime lookup tree // GenTree* Compiler::getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle) { assert(!compIsForInlining()); CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup; // If pRuntimeLookup->indirections is equal to CORINFO_USEHELPER, it specifies that a run-time helper should be // used; otherwise, it specifies the number of indirections via pRuntimeLookup->offsets array. if ((pRuntimeLookup->indirections == CORINFO_USEHELPER) || pRuntimeLookup->testForNull || pRuntimeLookup->testForFixup) { // If the first condition is true, runtime lookup tree is available only via the run-time helper function. // TODO-CQ If the second or third condition is true, we are always using the slow path since we can't // introduce control flow at this point. See impRuntimeLookupToTree for the logic to avoid calling the helper. // The long-term solution is to introduce a new node representing a runtime lookup, create instances // of that node both in the importer and here, and expand the node in lower (introducing control flow if // necessary). return gtNewRuntimeLookupHelperCallNode(pRuntimeLookup, getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind), compileTimeHandle); } GenTree* result = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind); ArrayStack<GenTree*> stmts(getAllocator(CMK_ArrayStack)); auto cloneTree = [&](GenTree** tree DEBUGARG(const char* reason)) -> GenTree* { if (!((*tree)->gtFlags & GTF_GLOB_EFFECT)) { GenTree* clone = gtClone(*tree, true); if (clone) { return clone; } } unsigned temp = lvaGrabTemp(true DEBUGARG(reason)); stmts.Push(gtNewTempAssign(temp, *tree)); *tree = gtNewLclvNode(temp, lvaGetActualType(temp)); return gtNewLclvNode(temp, lvaGetActualType(temp)); }; // Apply repeated indirections for (WORD i = 0; i < pRuntimeLookup->indirections; i++) { GenTree* preInd = nullptr; if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { preInd = cloneTree(&result DEBUGARG("getRuntimeLookupTree indirectOffset")); } if (i != 0) { result = gtNewOperNode(GT_IND, TYP_I_IMPL, result); result->gtFlags |= GTF_IND_NONFAULTING; result->gtFlags |= GTF_IND_INVARIANT; } if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { result = gtNewOperNode(GT_ADD, TYP_I_IMPL, preInd, result); } if (pRuntimeLookup->offsets[i] != 0) { result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL)); } } assert(!pRuntimeLookup->testForNull); if (pRuntimeLookup->indirections > 0) { assert(!pRuntimeLookup->testForFixup); result = gtNewOperNode(GT_IND, TYP_I_IMPL, result); result->gtFlags |= GTF_IND_NONFAULTING; } // Produces GT_COMMA(stmt1, GT_COMMA(stmt2, ... GT_COMMA(stmtN, result))) while (!stmts.Empty()) { result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, stmts.Pop(), result); } DISPTREE(result); return result; } //------------------------------------------------------------------------ // getVirtMethodPointerTree: get a tree for a virtual method pointer // // Arguments: // thisPtr - tree representing `this` pointer // pResolvedToken - pointer to the resolved token of the method // pCallInfo - pointer to call info // // Return Value: // A node representing the virtual method pointer GenTree* Compiler::getVirtMethodPointerTree(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* exactTypeDesc = getTokenHandleTree(pResolvedToken, true); GenTree* exactMethodDesc = getTokenHandleTree(pResolvedToken, false); GenTreeCall::Use* helpArgs = gtNewCallArgs(thisPtr, exactTypeDesc, exactMethodDesc); return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs); } //------------------------------------------------------------------------ // getTokenHandleTree: get a handle tree for a token // // Arguments: // pResolvedToken - token to get a handle for // parent - whether parent should be imported // // Return Value: // A node representing the virtual method pointer GenTree* Compiler::getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent) { CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->embedGenericHandle(pResolvedToken, parent, &embedInfo); GenTree* result = getLookupTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token), embedInfo.compileTimeHandle); // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node. if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup) { result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result); } return result; } /***************************************************************************** * * Transform the given GT_CALL tree for tail call via JIT helper. */ void Compiler::fgMorphTailCallViaJitHelper(GenTreeCall* call) { JITDUMP("fgMorphTailCallViaJitHelper (before):\n"); DISPTREE(call); // For the helper-assisted tail calls, we need to push all the arguments // into a single list, and then add a few extra at the beginning or end. // // For x86, the tailcall helper is defined as: // // JIT_TailCall(<function args>, int numberOfOldStackArgsWords, int numberOfNewStackArgsWords, int flags, void* // callTarget) // // Note that the special arguments are on the stack, whereas the function arguments follow // the normal convention: there might be register arguments in ECX and EDX. The stack will // look like (highest address at the top): // first normal stack argument // ... // last normal stack argument // numberOfOldStackArgs // numberOfNewStackArgs // flags // callTarget // // Each special arg is 4 bytes. // // 'flags' is a bitmask where: // 1 == restore callee-save registers (EDI,ESI,EBX). The JIT always saves all // callee-saved registers for tailcall functions. Note that the helper assumes // that the callee-saved registers live immediately below EBP, and must have been // pushed in this order: EDI, ESI, EBX. // 2 == call target is a virtual stub dispatch. // // The x86 tail call helper lives in VM\i386\jithelp.asm. See that function for more details // on the custom calling convention. // Check for PInvoke call types that we don't handle in codegen yet. assert(!call->IsUnmanaged()); assert(call->IsVirtual() || (call->gtCallType != CT_INDIRECT) || (call->gtCallCookie == nullptr)); // Don't support tail calling helper methods assert(call->gtCallType != CT_HELPER); // We come this route only for tail prefixed calls that cannot be dispatched as // fast tail calls assert(!call->IsImplicitTailCall()); // We want to use the following assert, but it can modify the IR in some cases, so we // can't do that in an assert. // assert(!fgCanFastTailCall(call, nullptr)); // First move the 'this' pointer (if any) onto the regular arg list. We do this because // we are going to prepend special arguments onto the argument list (for non-x86 platforms), // and thus shift where the 'this' pointer will be passed to a later argument slot. In // addition, for all platforms, we are going to change the call into a helper call. Our code // generation code for handling calls to helpers does not handle 'this' pointers. So, when we // do this transformation, we must explicitly create a null 'this' pointer check, if required, // since special 'this' pointer handling will no longer kick in. // // Some call types, such as virtual vtable calls, require creating a call address expression // that involves the "this" pointer. Lowering will sometimes create an embedded statement // to create a temporary that is assigned to the "this" pointer expression, and then use // that temp to create the call address expression. This temp creation embedded statement // will occur immediately before the "this" pointer argument, and then will be used for both // the "this" pointer argument as well as the call address expression. In the normal ordering, // the embedded statement establishing the "this" pointer temp will execute before both uses // of the temp. However, for tail calls via a helper, we move the "this" pointer onto the // normal call argument list, and insert a placeholder which will hold the call address // expression. For non-x86, things are ok, because the order of execution of these is not // altered. However, for x86, the call address expression is inserted as the *last* argument // in the argument list, *after* the "this" pointer. It will be put on the stack, and be // evaluated first. To ensure we don't end up with out-of-order temp definition and use, // for those cases where call lowering creates an embedded form temp of "this", we will // create a temp here, early, that will later get morphed correctly. if (call->gtCallThisArg != nullptr) { GenTree* thisPtr = nullptr; GenTree* objp = call->gtCallThisArg->GetNode(); call->gtCallThisArg = nullptr; if ((call->IsDelegateInvoke() || call->IsVirtualVtable()) && !objp->OperIs(GT_LCL_VAR)) { // tmp = "this" unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr")); GenTree* asg = gtNewTempAssign(lclNum, objp); // COMMA(tmp = "this", tmp) var_types vt = objp->TypeGet(); GenTree* tmp = gtNewLclvNode(lclNum, vt); thisPtr = gtNewOperNode(GT_COMMA, vt, asg, tmp); objp = thisPtr; } if (call->NeedsNullCheck()) { // clone "this" if "this" has no side effects. if ((thisPtr == nullptr) && !(objp->gtFlags & GTF_SIDE_EFFECT)) { thisPtr = gtClone(objp, true); } var_types vt = objp->TypeGet(); if (thisPtr == nullptr) { // create a temp if either "this" has side effects or "this" is too complex to clone. // tmp = "this" unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr")); GenTree* asg = gtNewTempAssign(lclNum, objp); // COMMA(tmp = "this", deref(tmp)) GenTree* tmp = gtNewLclvNode(lclNum, vt); GenTree* nullcheck = gtNewNullCheck(tmp, compCurBB); asg = gtNewOperNode(GT_COMMA, TYP_VOID, asg, nullcheck); // COMMA(COMMA(tmp = "this", deref(tmp)), tmp) thisPtr = gtNewOperNode(GT_COMMA, vt, asg, gtNewLclvNode(lclNum, vt)); } else { // thisPtr = COMMA(deref("this"), "this") GenTree* nullcheck = gtNewNullCheck(thisPtr, compCurBB); thisPtr = gtNewOperNode(GT_COMMA, vt, nullcheck, gtClone(objp, true)); } call->gtFlags &= ~GTF_CALL_NULLCHECK; } else { thisPtr = objp; } // TODO-Cleanup: we leave it as a virtual stub call to // use logic in `LowerVirtualStubCall`, clear GTF_CALL_VIRT_KIND_MASK here // and change `LowerCall` to recognize it as a direct call. // During rationalization tmp="this" and null check will // materialize as embedded stmts in right execution order. assert(thisPtr != nullptr); call->gtCallArgs = gtPrependNewCallArg(thisPtr, call->gtCallArgs); } // Find the end of the argument list. ppArg will point at the last pointer; setting *ppArg will // append to the list. GenTreeCall::Use** ppArg = &call->gtCallArgs; for (GenTreeCall::Use& use : call->Args()) { ppArg = &use.NextRef(); } assert(ppArg != nullptr); assert(*ppArg == nullptr); unsigned nOldStkArgsWords = (compArgSize - (codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES)) / REGSIZE_BYTES; GenTree* arg3 = gtNewIconNode((ssize_t)nOldStkArgsWords, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg3); // numberOfOldStackArgs ppArg = &((*ppArg)->NextRef()); // Inject a placeholder for the count of outgoing stack arguments that the Lowering phase will generate. // The constant will be replaced. GenTree* arg2 = gtNewIconNode(9, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg2); // numberOfNewStackArgs ppArg = &((*ppArg)->NextRef()); // Inject a placeholder for the flags. // The constant will be replaced. GenTree* arg1 = gtNewIconNode(8, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg1); ppArg = &((*ppArg)->NextRef()); // Inject a placeholder for the real call target that the Lowering phase will generate. // The constant will be replaced. GenTree* arg0 = gtNewIconNode(7, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg0); // It is now a varargs tail call. call->gtCallMoreFlags |= GTF_CALL_M_VARARGS; call->gtFlags &= ~GTF_CALL_POP_ARGS; // The function is responsible for doing explicit null check when it is necessary. assert(!call->NeedsNullCheck()); JITDUMP("fgMorphTailCallViaJitHelper (after):\n"); DISPTREE(call); } //------------------------------------------------------------------------ // fgGetStubAddrArg: Return the virtual stub address for the given call. // // Notes: // the JIT must place the address of the stub used to load the call target, // the "stub indirection cell", in special call argument with special register. // // Arguments: // call - a call that needs virtual stub dispatching. // // Return Value: // addr tree with set resister requirements. // GenTree* Compiler::fgGetStubAddrArg(GenTreeCall* call) { assert(call->IsVirtualStub()); GenTree* stubAddrArg; if (call->gtCallType == CT_INDIRECT) { stubAddrArg = gtClone(call->gtCallAddr, true); } else { assert(call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT); ssize_t addr = ssize_t(call->gtStubCallStubAddr); stubAddrArg = gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR); #ifdef DEBUG stubAddrArg->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd; #endif } assert(stubAddrArg != nullptr); stubAddrArg->SetRegNum(virtualStubParamInfo->GetReg()); return stubAddrArg; } //------------------------------------------------------------------------------ // fgGetArgTabEntryParameterLclNum : Get the lcl num for the parameter that // corresponds to the argument to a recursive call. // // Notes: // Due to non-standard args this is not just fgArgTabEntry::argNum. // For example, in R2R compilations we will have added a non-standard // arg for the R2R indirection cell. // // Arguments: // argTabEntry - the arg // unsigned Compiler::fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry) { fgArgInfo* argInfo = call->fgArgInfo; unsigned argCount = argInfo->ArgCount(); fgArgTabEntry** argTable = argInfo->ArgTable(); unsigned numToRemove = 0; for (unsigned i = 0; i < argCount; i++) { fgArgTabEntry* arg = argTable[i]; // Late added args add extra args that do not map to IL parameters and that we should not reassign. if (!arg->isNonStandard() || !arg->isNonStandardArgAddedLate()) continue; if (arg->argNum < argTabEntry->argNum) numToRemove++; } return argTabEntry->argNum - numToRemove; } //------------------------------------------------------------------------------ // fgMorphRecursiveFastTailCallIntoLoop : Transform a recursive fast tail call into a loop. // // // Arguments: // block - basic block ending with a recursive fast tail call // recursiveTailCall - recursive tail call to transform // // Notes: // The legality of the transformation is ensured by the checks in endsWithTailCallConvertibleToLoop. void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall) { assert(recursiveTailCall->IsTailCallConvertibleToLoop()); Statement* lastStmt = block->lastStmt(); assert(recursiveTailCall == lastStmt->GetRootNode()); // Transform recursive tail call into a loop. Statement* earlyArgInsertionPoint = lastStmt; const DebugInfo& callDI = lastStmt->GetDebugInfo(); // Hoist arg setup statement for the 'this' argument. GenTreeCall::Use* thisArg = recursiveTailCall->gtCallThisArg; if ((thisArg != nullptr) && !thisArg->GetNode()->IsNothingNode() && !thisArg->GetNode()->IsArgPlaceHolderNode()) { Statement* thisArgStmt = gtNewStmt(thisArg->GetNode(), callDI); fgInsertStmtBefore(block, earlyArgInsertionPoint, thisArgStmt); } // All arguments whose trees may involve caller parameter local variables need to be assigned to temps first; // then the temps need to be assigned to the method parameters. This is done so that the caller // parameters are not re-assigned before call arguments depending on them are evaluated. // tmpAssignmentInsertionPoint and paramAssignmentInsertionPoint keep track of // where the next temp or parameter assignment should be inserted. // In the example below the first call argument (arg1 - 1) needs to be assigned to a temp first // while the second call argument (const 1) doesn't. // Basic block before tail recursion elimination: // ***** BB04, stmt 1 (top level) // [000037] ------------ * stmtExpr void (top level) (IL 0x00A...0x013) // [000033] --C - G------ - \--* call void RecursiveMethod // [000030] ------------ | / --* const int - 1 // [000031] ------------arg0 in rcx + --* +int // [000029] ------------ | \--* lclVar int V00 arg1 // [000032] ------------arg1 in rdx \--* const int 1 // // // Basic block after tail recursion elimination : // ***** BB04, stmt 1 (top level) // [000051] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? ) // [000030] ------------ | / --* const int - 1 // [000031] ------------ | / --* +int // [000029] ------------ | | \--* lclVar int V00 arg1 // [000050] - A---------- \--* = int // [000049] D------N---- \--* lclVar int V02 tmp0 // // ***** BB04, stmt 2 (top level) // [000055] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? ) // [000052] ------------ | / --* lclVar int V02 tmp0 // [000054] - A---------- \--* = int // [000053] D------N---- \--* lclVar int V00 arg0 // ***** BB04, stmt 3 (top level) // [000058] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? ) // [000032] ------------ | / --* const int 1 // [000057] - A---------- \--* = int // [000056] D------N---- \--* lclVar int V01 arg1 Statement* tmpAssignmentInsertionPoint = lastStmt; Statement* paramAssignmentInsertionPoint = lastStmt; // Process early args. They may contain both setup statements for late args and actual args. // Early args don't include 'this' arg. We need to account for that so that the call to gtArgEntryByArgNum // below has the correct second argument. int earlyArgIndex = (thisArg == nullptr) ? 0 : 1; for (GenTreeCall::Use& use : recursiveTailCall->Args()) { GenTree* earlyArg = use.GetNode(); if (!earlyArg->IsNothingNode() && !earlyArg->IsArgPlaceHolderNode()) { if ((earlyArg->gtFlags & GTF_LATE_ARG) != 0) { // This is a setup node so we need to hoist it. Statement* earlyArgStmt = gtNewStmt(earlyArg, callDI); fgInsertStmtBefore(block, earlyArgInsertionPoint, earlyArgStmt); } else { // This is an actual argument that needs to be assigned to the corresponding caller parameter. fgArgTabEntry* curArgTabEntry = gtArgEntryByArgNum(recursiveTailCall, earlyArgIndex); // Late-added non-standard args are extra args that are not passed as locals, so skip those if (!curArgTabEntry->isNonStandard() || !curArgTabEntry->isNonStandardArgAddedLate()) { Statement* paramAssignStmt = fgAssignRecursiveCallArgToCallerParam(earlyArg, curArgTabEntry, fgGetArgTabEntryParameterLclNum(recursiveTailCall, curArgTabEntry), block, callDI, tmpAssignmentInsertionPoint, paramAssignmentInsertionPoint); if ((tmpAssignmentInsertionPoint == lastStmt) && (paramAssignStmt != nullptr)) { // All temp assignments will happen before the first param assignment. tmpAssignmentInsertionPoint = paramAssignStmt; } } } } earlyArgIndex++; } // Process late args. int lateArgIndex = 0; for (GenTreeCall::Use& use : recursiveTailCall->LateArgs()) { // A late argument is an actual argument that needs to be assigned to the corresponding caller's parameter. GenTree* lateArg = use.GetNode(); fgArgTabEntry* curArgTabEntry = gtArgEntryByLateArgIndex(recursiveTailCall, lateArgIndex); // Late-added non-standard args are extra args that are not passed as locals, so skip those if (!curArgTabEntry->isNonStandard() || !curArgTabEntry->isNonStandardArgAddedLate()) { Statement* paramAssignStmt = fgAssignRecursiveCallArgToCallerParam(lateArg, curArgTabEntry, fgGetArgTabEntryParameterLclNum(recursiveTailCall, curArgTabEntry), block, callDI, tmpAssignmentInsertionPoint, paramAssignmentInsertionPoint); if ((tmpAssignmentInsertionPoint == lastStmt) && (paramAssignStmt != nullptr)) { // All temp assignments will happen before the first param assignment. tmpAssignmentInsertionPoint = paramAssignStmt; } } lateArgIndex++; } // If the method has starg.s 0 or ldarga.s 0 a special local (lvaArg0Var) is created so that // compThisArg stays immutable. Normally it's assigned in fgFirstBBScratch block. Since that // block won't be in the loop (it's assumed to have no predecessors), we need to update the special local here. if (!info.compIsStatic && (lvaArg0Var != info.compThisArg)) { var_types thisType = lvaTable[info.compThisArg].TypeGet(); GenTree* arg0 = gtNewLclvNode(lvaArg0Var, thisType); GenTree* arg0Assignment = gtNewAssignNode(arg0, gtNewLclvNode(info.compThisArg, thisType)); Statement* arg0AssignmentStmt = gtNewStmt(arg0Assignment, callDI); fgInsertStmtBefore(block, paramAssignmentInsertionPoint, arg0AssignmentStmt); } // If compInitMem is set, we may need to zero-initialize some locals. Normally it's done in the prolog // but this loop can't include the prolog. Since we don't have liveness information, we insert zero-initialization // for all non-parameter IL locals as well as temp structs with GC fields. // Liveness phase will remove unnecessary initializations. if (info.compInitMem || compSuppressedZeroInit) { unsigned varNum; LclVarDsc* varDsc; for (varNum = 0, varDsc = lvaTable; varNum < lvaCount; varNum++, varDsc++) { #if FEATURE_FIXED_OUT_ARGS if (varNum == lvaOutgoingArgSpaceVar) { continue; } #endif // FEATURE_FIXED_OUT_ARGS if (!varDsc->lvIsParam) { var_types lclType = varDsc->TypeGet(); bool isUserLocal = (varNum < info.compLocalsCount); bool structWithGCFields = ((lclType == TYP_STRUCT) && varDsc->GetLayout()->HasGCPtr()); bool hadSuppressedInit = varDsc->lvSuppressedZeroInit; if ((info.compInitMem && (isUserLocal || structWithGCFields)) || hadSuppressedInit) { GenTree* lcl = gtNewLclvNode(varNum, lclType); GenTree* init = nullptr; if (varTypeIsStruct(lclType)) { const bool isVolatile = false; const bool isCopyBlock = false; init = gtNewBlkOpNode(lcl, gtNewIconNode(0), isVolatile, isCopyBlock); init = fgMorphInitBlock(init); } else { GenTree* zero = gtNewZeroConNode(genActualType(lclType)); init = gtNewAssignNode(lcl, zero); } Statement* initStmt = gtNewStmt(init, callDI); fgInsertStmtBefore(block, lastStmt, initStmt); } } } } // Remove the call fgRemoveStmt(block, lastStmt); // Set the loop edge. if (opts.IsOSR()) { // Todo: this may not look like a viable loop header. // Might need the moral equivalent of a scratch BB. block->bbJumpDest = fgEntryBB; } else { // Ensure we have a scratch block and then target the next // block. Loop detection needs to see a pred out of the loop, // so mark the scratch block BBF_DONT_REMOVE to prevent empty // block removal on it. fgEnsureFirstBBisScratch(); fgFirstBB->bbFlags |= BBF_DONT_REMOVE; block->bbJumpDest = fgFirstBB->bbNext; } // Finish hooking things up. block->bbJumpKind = BBJ_ALWAYS; fgAddRefPred(block->bbJumpDest, block); block->bbFlags &= ~BBF_HAS_JMP; } //------------------------------------------------------------------------------ // fgAssignRecursiveCallArgToCallerParam : Assign argument to a recursive call to the corresponding caller parameter. // // // Arguments: // arg - argument to assign // argTabEntry - argument table entry corresponding to arg // lclParamNum - the lcl num of the parameter // block --- basic block the call is in // callILOffset - IL offset of the call // tmpAssignmentInsertionPoint - tree before which temp assignment should be inserted (if necessary) // paramAssignmentInsertionPoint - tree before which parameter assignment should be inserted // // Return Value: // parameter assignment statement if one was inserted; nullptr otherwise. Statement* Compiler::fgAssignRecursiveCallArgToCallerParam(GenTree* arg, fgArgTabEntry* argTabEntry, unsigned lclParamNum, BasicBlock* block, const DebugInfo& callDI, Statement* tmpAssignmentInsertionPoint, Statement* paramAssignmentInsertionPoint) { // Call arguments should be assigned to temps first and then the temps should be assigned to parameters because // some argument trees may reference parameters directly. GenTree* argInTemp = nullptr; bool needToAssignParameter = true; // TODO-CQ: enable calls with struct arguments passed in registers. noway_assert(!varTypeIsStruct(arg->TypeGet())); if ((argTabEntry->isTmp) || arg->IsCnsIntOrI() || arg->IsCnsFltOrDbl()) { // The argument is already assigned to a temp or is a const. argInTemp = arg; } else if (arg->OperGet() == GT_LCL_VAR) { unsigned lclNum = arg->AsLclVar()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (!varDsc->lvIsParam) { // The argument is a non-parameter local so it doesn't need to be assigned to a temp. argInTemp = arg; } else if (lclNum == lclParamNum) { // The argument is the same parameter local that we were about to assign so // we can skip the assignment. needToAssignParameter = false; } } // TODO: We don't need temp assignments if we can prove that the argument tree doesn't involve // any caller parameters. Some common cases are handled above but we may be able to eliminate // more temp assignments. Statement* paramAssignStmt = nullptr; if (needToAssignParameter) { if (argInTemp == nullptr) { // The argument is not assigned to a temp. We need to create a new temp and insert an assignment. // TODO: we can avoid a temp assignment if we can prove that the argument tree // doesn't involve any caller parameters. unsigned tmpNum = lvaGrabTemp(true DEBUGARG("arg temp")); lvaTable[tmpNum].lvType = arg->gtType; GenTree* tempSrc = arg; GenTree* tempDest = gtNewLclvNode(tmpNum, tempSrc->gtType); GenTree* tmpAssignNode = gtNewAssignNode(tempDest, tempSrc); Statement* tmpAssignStmt = gtNewStmt(tmpAssignNode, callDI); fgInsertStmtBefore(block, tmpAssignmentInsertionPoint, tmpAssignStmt); argInTemp = gtNewLclvNode(tmpNum, tempSrc->gtType); } // Now assign the temp to the parameter. const LclVarDsc* paramDsc = lvaGetDesc(lclParamNum); assert(paramDsc->lvIsParam); GenTree* paramDest = gtNewLclvNode(lclParamNum, paramDsc->lvType); GenTree* paramAssignNode = gtNewAssignNode(paramDest, argInTemp); paramAssignStmt = gtNewStmt(paramAssignNode, callDI); fgInsertStmtBefore(block, paramAssignmentInsertionPoint, paramAssignStmt); } return paramAssignStmt; } /***************************************************************************** * * Transform the given GT_CALL tree for code generation. */ GenTree* Compiler::fgMorphCall(GenTreeCall* call) { if (call->CanTailCall()) { GenTree* newNode = fgMorphPotentialTailCall(call); if (newNode != nullptr) { return newNode; } assert(!call->CanTailCall()); #if FEATURE_MULTIREG_RET if (fgGlobalMorph && call->HasMultiRegRetVal() && varTypeIsStruct(call->TypeGet())) { // The tail call has been rejected so we must finish the work deferred // by impFixupCallStructReturn for multi-reg-returning calls and transform // ret call // into // temp = call // ret temp // Force re-evaluating the argInfo as the return argument has changed. call->ResetArgInfo(); // Create a new temp. unsigned tmpNum = lvaGrabTemp(false DEBUGARG("Return value temp for multi-reg return (rejected tail call).")); lvaTable[tmpNum].lvIsMultiRegRet = true; CORINFO_CLASS_HANDLE structHandle = call->gtRetClsHnd; assert(structHandle != NO_CLASS_HANDLE); const bool unsafeValueClsCheck = false; lvaSetStruct(tmpNum, structHandle, unsafeValueClsCheck); var_types structType = lvaTable[tmpNum].lvType; GenTree* dst = gtNewLclvNode(tmpNum, structType); GenTree* assg = gtNewAssignNode(dst, call); assg = fgMorphTree(assg); // Create the assignment statement and insert it before the current statement. Statement* assgStmt = gtNewStmt(assg, compCurStmt->GetDebugInfo()); fgInsertStmtBefore(compCurBB, compCurStmt, assgStmt); // Return the temp. GenTree* result = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType); result->gtFlags |= GTF_DONT_CSE; compCurBB->bbFlags |= BBF_HAS_CALL; // This block has a call #ifdef DEBUG if (verbose) { printf("\nInserting assignment of a multi-reg call result to a temp:\n"); gtDispStmt(assgStmt); } result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG return result; } #endif } if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) == 0 && (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_VIRTUAL_FUNC_PTR) #ifdef FEATURE_READYTORUN || call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR) #endif ) && (call == fgMorphStmt->GetRootNode())) { // This is call to CORINFO_HELP_VIRTUAL_FUNC_PTR with ignored result. // Transform it into a null check. GenTree* thisPtr = call->gtCallArgs->GetNode(); GenTree* nullCheck = gtNewNullCheck(thisPtr, compCurBB); return fgMorphTree(nullCheck); } noway_assert(call->gtOper == GT_CALL); // // Only count calls once (only in the global morph phase) // if (fgGlobalMorph) { if (call->gtCallType == CT_INDIRECT) { optCallCount++; optIndirectCallCount++; } else if (call->gtCallType == CT_USER_FUNC) { optCallCount++; if (call->IsVirtual()) { optIndirectCallCount++; } } } // Couldn't inline - remember that this BB contains method calls // Mark the block as a GC safe point for the call if possible. // In the event the call indicates the block isn't a GC safe point // and the call is unmanaged with a GC transition suppression request // then insert a GC poll. CLANG_FORMAT_COMMENT_ANCHOR; if (IsGcSafePoint(call)) { compCurBB->bbFlags |= BBF_GC_SAFE_POINT; } // Regardless of the state of the basic block with respect to GC safe point, // we will always insert a GC Poll for scenarios involving a suppressed GC // transition. Only mark the block for GC Poll insertion on the first morph. if (fgGlobalMorph && call->IsUnmanaged() && call->IsSuppressGCTransition()) { compCurBB->bbFlags |= (BBF_HAS_SUPPRESSGC_CALL | BBF_GC_SAFE_POINT); optMethodFlags |= OMF_NEEDS_GCPOLLS; } // Morph Type.op_Equality, Type.op_Inequality, and Enum.HasFlag // // We need to do these before the arguments are morphed if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)) { // See if this is foldable GenTree* optTree = gtFoldExprCall(call); // If we optimized, morph the result if (optTree != call) { return fgMorphTree(optTree); } } compCurBB->bbFlags |= BBF_HAS_CALL; // This block has a call /* Process the "normal" argument list */ call = fgMorphArgs(call); noway_assert(call->gtOper == GT_CALL); // Should we expand this virtual method call target early here? // if (call->IsExpandedEarly() && call->IsVirtualVtable()) { // We only expand the Vtable Call target once in the global morph phase if (fgGlobalMorph) { assert(call->gtControlExpr == nullptr); // We only call this method and assign gtControlExpr once call->gtControlExpr = fgExpandVirtualVtableCallTarget(call); } // We always have to morph or re-morph the control expr // call->gtControlExpr = fgMorphTree(call->gtControlExpr); // Propagate any gtFlags into the call call->gtFlags |= call->gtControlExpr->gtFlags; } // Morph stelem.ref helper call to store a null value, into a store into an array without the helper. // This needs to be done after the arguments are morphed to ensure constant propagation has already taken place. if (opts.OptimizationEnabled() && (call->gtCallType == CT_HELPER) && (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_ARRADDR_ST))) { GenTree* value = gtArgEntryByArgNum(call, 2)->GetNode(); if (value->IsIntegralConst(0)) { assert(value->OperGet() == GT_CNS_INT); GenTree* arr = gtArgEntryByArgNum(call, 0)->GetNode(); GenTree* index = gtArgEntryByArgNum(call, 1)->GetNode(); // Either or both of the array and index arguments may have been spilled to temps by `fgMorphArgs`. Copy // the spill trees as well if necessary. GenTreeOp* argSetup = nullptr; for (GenTreeCall::Use& use : call->Args()) { GenTree* const arg = use.GetNode(); if (arg->OperGet() != GT_ASG) { continue; } assert(arg != arr); assert(arg != index); arg->gtFlags &= ~GTF_LATE_ARG; GenTree* op1 = argSetup; if (op1 == nullptr) { op1 = gtNewNothingNode(); #if DEBUG op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } argSetup = new (this, GT_COMMA) GenTreeOp(GT_COMMA, TYP_VOID, op1, arg); #if DEBUG argSetup->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } #ifdef DEBUG auto resetMorphedFlag = [](GenTree** slot, fgWalkData* data) -> fgWalkResult { (*slot)->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; return WALK_CONTINUE; }; fgWalkTreePost(&arr, resetMorphedFlag); fgWalkTreePost(&index, resetMorphedFlag); fgWalkTreePost(&value, resetMorphedFlag); #endif // DEBUG GenTree* const arrIndexNode = gtNewIndexRef(TYP_REF, arr, index); GenTree* const arrStore = gtNewAssignNode(arrIndexNode, value); GenTree* result = fgMorphTree(arrStore); if (argSetup != nullptr) { result = new (this, GT_COMMA) GenTreeOp(GT_COMMA, TYP_VOID, argSetup, result); #if DEBUG result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } return result; } } if (call->IsNoReturn()) { // // If we know that the call does not return then we can set fgRemoveRestOfBlock // to remove all subsequent statements and change the call's basic block to BBJ_THROW. // As a result the compiler won't need to preserve live registers across the call. // // This isn't need for tail calls as there shouldn't be any code after the call anyway. // Besides, the tail call code is part of the epilog and converting the block to // BBJ_THROW would result in the tail call being dropped as the epilog is generated // only for BBJ_RETURN blocks. // if (!call->IsTailCall()) { fgRemoveRestOfBlock = true; } } return call; } /***************************************************************************** * * Expand and return the call target address for a VirtualCall * The code here should match that generated by LowerVirtualVtableCall */ GenTree* Compiler::fgExpandVirtualVtableCallTarget(GenTreeCall* call) { GenTree* result; JITDUMP("Expanding virtual call target for %d.%s:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); noway_assert(call->gtCallType == CT_USER_FUNC); // get a reference to the thisPtr being passed fgArgTabEntry* thisArgTabEntry = gtArgEntryByArgNum(call, 0); GenTree* thisPtr = thisArgTabEntry->GetNode(); // fgMorphArgs must enforce this invariant by creating a temp // assert(thisPtr->OperIsLocal()); // Make a copy of the thisPtr by cloning // thisPtr = gtClone(thisPtr, true); noway_assert(thisPtr != nullptr); // Get hold of the vtable offset unsigned vtabOffsOfIndirection; unsigned vtabOffsAfterIndirection; bool isRelative; info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection, &vtabOffsAfterIndirection, &isRelative); // Dereference the this pointer to obtain the method table, it is called vtab below GenTree* vtab; assert(VPTR_OFFS == 0); // We have to add this value to the thisPtr to get the methodTable vtab = gtNewOperNode(GT_IND, TYP_I_IMPL, thisPtr); vtab->gtFlags |= GTF_IND_INVARIANT; // Get the appropriate vtable chunk if (vtabOffsOfIndirection != CORINFO_VIRTUALCALL_NO_CHUNK) { // Note this isRelative code path is currently never executed // as the VM doesn't ever return: isRelative == true // if (isRelative) { // MethodTable offset is a relative pointer. // // Additional temporary variable is used to store virtual table pointer. // Address of method is obtained by the next computations: // // Save relative offset to tmp (vtab is virtual table pointer, vtabOffsOfIndirection is offset of // vtable-1st-level-indirection): // tmp = vtab // // Save address of method to result (vtabOffsAfterIndirection is offset of vtable-2nd-level-indirection): // result = [tmp + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp + vtabOffsOfIndirection]] // // // When isRelative is true we need to setup two temporary variables // var1 = vtab // var2 = var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [var1 + vtabOffsOfIndirection] // result = [var2] + var2 // unsigned varNum1 = lvaGrabTemp(true DEBUGARG("var1 - vtab")); unsigned varNum2 = lvaGrabTemp(true DEBUGARG("var2 - relative")); GenTree* asgVar1 = gtNewTempAssign(varNum1, vtab); // var1 = vtab // [tmp + vtabOffsOfIndirection] GenTree* tmpTree1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL), gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL)); tmpTree1 = gtNewOperNode(GT_IND, TYP_I_IMPL, tmpTree1, false); tmpTree1->gtFlags |= GTF_IND_NONFAULTING; tmpTree1->gtFlags |= GTF_IND_INVARIANT; // var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection GenTree* tmpTree2 = gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL), gtNewIconNode(vtabOffsOfIndirection + vtabOffsAfterIndirection, TYP_I_IMPL)); // var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [var1 + vtabOffsOfIndirection] tmpTree2 = gtNewOperNode(GT_ADD, TYP_I_IMPL, tmpTree2, tmpTree1); GenTree* asgVar2 = gtNewTempAssign(varNum2, tmpTree2); // var2 = <expression> // This last indirection is not invariant, but is non-faulting result = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewLclvNode(varNum2, TYP_I_IMPL), false); // [var2] result->gtFlags |= GTF_IND_NONFAULTING; result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewLclvNode(varNum2, TYP_I_IMPL)); // [var2] + var2 // Now stitch together the two assignment and the calculation of result into a single tree GenTree* commaTree = gtNewOperNode(GT_COMMA, TYP_I_IMPL, asgVar2, result); result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, asgVar1, commaTree); } else { // result = [vtab + vtabOffsOfIndirection] result = gtNewOperNode(GT_ADD, TYP_I_IMPL, vtab, gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL)); result = gtNewOperNode(GT_IND, TYP_I_IMPL, result, false); result->gtFlags |= GTF_IND_NONFAULTING; result->gtFlags |= GTF_IND_INVARIANT; } } else { result = vtab; assert(!isRelative); } if (!isRelative) { // Load the function address // result = [result + vtabOffsAfterIndirection] result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewIconNode(vtabOffsAfterIndirection, TYP_I_IMPL)); // This last indirection is not invariant, but is non-faulting result = gtNewOperNode(GT_IND, TYP_I_IMPL, result, false); result->gtFlags |= GTF_IND_NONFAULTING; } return result; } /***************************************************************************** * * Transform the given constant tree for code generation. */ GenTree* Compiler::fgMorphConst(GenTree* tree) { assert(tree->OperIsConst()); /* Clear any exception flags or other unnecessary flags * that may have been set before folding this node to a constant */ tree->gtFlags &= ~(GTF_ALL_EFFECT | GTF_REVERSE_OPS); if (!tree->OperIs(GT_CNS_STR)) { return tree; } if (tree->AsStrCon()->IsStringEmptyField()) { LPVOID pValue; InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue); return fgMorphTree(gtNewStringLiteralNode(iat, pValue)); } // TODO-CQ: Do this for compCurBB->isRunRarely(). Doing that currently will // guarantee slow performance for that block. Instead cache the return value // of CORINFO_HELP_STRCNS and go to cache first giving reasonable perf. bool useLazyStrCns = false; if (compCurBB->bbJumpKind == BBJ_THROW) { useLazyStrCns = true; } else if (fgGlobalMorph && compCurStmt->GetRootNode()->IsCall()) { // Quick check: if the root node of the current statement happens to be a noreturn call. GenTreeCall* call = compCurStmt->GetRootNode()->AsCall(); useLazyStrCns = call->IsNoReturn() || fgIsThrow(call); } if (useLazyStrCns) { CorInfoHelpFunc helper = info.compCompHnd->getLazyStringLiteralHelper(tree->AsStrCon()->gtScpHnd); if (helper != CORINFO_HELP_UNDEF) { // For un-important blocks, we want to construct the string lazily GenTreeCall::Use* args; if (helper == CORINFO_HELP_STRCNS_CURRENT_MODULE) { args = gtNewCallArgs(gtNewIconNode(RidFromToken(tree->AsStrCon()->gtSconCPX), TYP_INT)); } else { args = gtNewCallArgs(gtNewIconNode(RidFromToken(tree->AsStrCon()->gtSconCPX), TYP_INT), gtNewIconEmbScpHndNode(tree->AsStrCon()->gtScpHnd)); } tree = gtNewHelperCallNode(helper, TYP_REF, args); return fgMorphTree(tree); } } assert(tree->AsStrCon()->gtScpHnd == info.compScopeHnd || !IsUninitialized(tree->AsStrCon()->gtScpHnd)); LPVOID pValue; InfoAccessType iat = info.compCompHnd->constructStringLiteral(tree->AsStrCon()->gtScpHnd, tree->AsStrCon()->gtSconCPX, &pValue); tree = gtNewStringLiteralNode(iat, pValue); return fgMorphTree(tree); } //------------------------------------------------------------------------ // fgMorphTryFoldObjAsLclVar: try to fold an Obj node as a LclVar. // // Arguments: // obj - the obj node. // destroyNodes -- destroy nodes that are optimized away // // Return value: // GenTreeLclVar if the obj can be replaced by it, null otherwise. // // Notes: // TODO-CQ: currently this transformation is done only under copy block, // but it is benefitial to do for each OBJ node. However, `PUT_ARG_STACK` // for some platforms does not expect struct `LCL_VAR` as a source, so // it needs more work. // GenTreeLclVar* Compiler::fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes) { if (opts.OptimizationEnabled()) { GenTree* op1 = obj->Addr(); assert(!op1->OperIs(GT_LCL_VAR_ADDR) && "missed an opt opportunity"); if (op1->OperIs(GT_ADDR)) { GenTreeUnOp* addr = op1->AsUnOp(); GenTree* addrOp = addr->gtGetOp1(); if (addrOp->TypeIs(obj->TypeGet()) && addrOp->OperIs(GT_LCL_VAR)) { GenTreeLclVar* lclVar = addrOp->AsLclVar(); ClassLayout* lclVarLayout = lvaGetDesc(lclVar)->GetLayout(); ClassLayout* objLayout = obj->GetLayout(); if (ClassLayout::AreCompatible(lclVarLayout, objLayout)) { #ifdef DEBUG CORINFO_CLASS_HANDLE objClsHandle = obj->GetLayout()->GetClassHandle(); assert(objClsHandle != NO_CLASS_HANDLE); if (verbose) { CORINFO_CLASS_HANDLE lclClsHnd = gtGetStructHandle(lclVar); printf("fold OBJ(ADDR(X)) [%06u] into X [%06u], ", dspTreeID(obj), dspTreeID(lclVar)); printf("with %s handles\n", ((lclClsHnd == objClsHandle) ? "matching" : "different")); } #endif // Keep the DONT_CSE flag in sync // (as the addr always marks it for its op1) lclVar->gtFlags &= ~GTF_DONT_CSE; lclVar->gtFlags |= (obj->gtFlags & GTF_DONT_CSE); if (destroyNodes) { DEBUG_DESTROY_NODE(obj); DEBUG_DESTROY_NODE(addr); } return lclVar; } } } } return nullptr; } /***************************************************************************** * * Transform the given GTK_LEAF tree for code generation. */ GenTree* Compiler::fgMorphLeaf(GenTree* tree) { assert(tree->OperKind() & GTK_LEAF); if (tree->gtOper == GT_LCL_VAR) { const bool forceRemorph = false; return fgMorphLocalVar(tree, forceRemorph); } else if (tree->gtOper == GT_LCL_FLD) { if (lvaGetDesc(tree->AsLclFld())->IsAddressExposed()) { tree->gtFlags |= GTF_GLOB_REF; } #ifdef TARGET_X86 if (info.compIsVarArgs) { GenTree* newTree = fgMorphStackArgForVarArgs(tree->AsLclFld()->GetLclNum(), tree->TypeGet(), tree->AsLclFld()->GetLclOffs()); if (newTree != nullptr) { if (newTree->OperIsBlk() && ((tree->gtFlags & GTF_VAR_DEF) == 0)) { newTree->SetOper(GT_IND); } return newTree; } } #endif // TARGET_X86 } else if (tree->gtOper == GT_FTN_ADDR) { GenTreeFptrVal* fptrValTree = tree->AsFptrVal(); // A function pointer address is being used. Let the VM know if this is the // target of a Delegate or a raw function pointer. bool isUnsafeFunctionPointer = !fptrValTree->gtFptrDelegateTarget; CORINFO_CONST_LOOKUP addrInfo; #ifdef FEATURE_READYTORUN if (fptrValTree->gtEntryPoint.addr != nullptr) { addrInfo = fptrValTree->gtEntryPoint; } else #endif { info.compCompHnd->getFunctionFixedEntryPoint(fptrValTree->gtFptrMethod, isUnsafeFunctionPointer, &addrInfo); } GenTree* indNode = nullptr; switch (addrInfo.accessType) { case IAT_PPVALUE: indNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)addrInfo.handle, GTF_ICON_CONST_PTR, true); // Add the second indirection indNode = gtNewOperNode(GT_IND, TYP_I_IMPL, indNode); // This indirection won't cause an exception. indNode->gtFlags |= GTF_IND_NONFAULTING; // This indirection also is invariant. indNode->gtFlags |= GTF_IND_INVARIANT; break; case IAT_PVALUE: indNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)addrInfo.handle, GTF_ICON_FTN_ADDR, true); break; case IAT_VALUE: // Refer to gtNewIconHandleNode() as the template for constructing a constant handle // tree->SetOper(GT_CNS_INT); tree->AsIntConCommon()->SetIconValue(ssize_t(addrInfo.handle)); tree->gtFlags |= GTF_ICON_FTN_ADDR; break; default: noway_assert(!"Unknown addrInfo.accessType"); } if (indNode != nullptr) { DEBUG_DESTROY_NODE(tree); tree = fgMorphTree(indNode); } } return tree; } void Compiler::fgAssignSetVarDef(GenTree* tree) { GenTreeLclVarCommon* lclVarCmnTree; bool isEntire = false; if (tree->DefinesLocal(this, &lclVarCmnTree, &isEntire)) { if (isEntire) { lclVarCmnTree->gtFlags |= GTF_VAR_DEF; } else { // We consider partial definitions to be modeled as uses followed by definitions. // This captures the idea that precedings defs are not necessarily made redundant // by this definition. lclVarCmnTree->gtFlags |= (GTF_VAR_DEF | GTF_VAR_USEASG); } } } //------------------------------------------------------------------------ // fgMorphOneAsgBlockOp: Attempt to replace a block assignment with a scalar assignment // // Arguments: // tree - The block assignment to be possibly morphed // // Return Value: // The modified tree if successful, nullptr otherwise. // // Assumptions: // 'tree' must be a block assignment. // // Notes: // If successful, this method always returns the incoming tree, modifying only // its arguments. // GenTree* Compiler::fgMorphOneAsgBlockOp(GenTree* tree) { // This must be a block assignment. noway_assert(tree->OperIsBlkOp()); var_types asgType = tree->TypeGet(); GenTree* asg = tree; GenTree* dest = asg->gtGetOp1(); GenTree* src = asg->gtGetOp2(); unsigned destVarNum = BAD_VAR_NUM; LclVarDsc* destVarDsc = nullptr; GenTree* destLclVarTree = nullptr; bool isCopyBlock = asg->OperIsCopyBlkOp(); bool isInitBlock = !isCopyBlock; unsigned size = 0; CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE; if (dest->gtEffectiveVal()->OperIsBlk()) { GenTreeBlk* lhsBlk = dest->gtEffectiveVal()->AsBlk(); size = lhsBlk->Size(); if (impIsAddressInLocal(lhsBlk->Addr(), &destLclVarTree)) { destVarNum = destLclVarTree->AsLclVarCommon()->GetLclNum(); destVarDsc = lvaGetDesc(destVarNum); } if (lhsBlk->OperGet() == GT_OBJ) { clsHnd = lhsBlk->AsObj()->GetLayout()->GetClassHandle(); } } else { // Is this an enregisterable struct that is already a simple assignment? // This can happen if we are re-morphing. // Note that we won't do this straightaway if this is a SIMD type, since it // may be a promoted lclVar (sometimes we promote the individual float fields of // fixed-size SIMD). if (dest->OperGet() == GT_IND) { noway_assert(asgType != TYP_STRUCT); if (varTypeIsStruct(asgType)) { destLclVarTree = fgIsIndirOfAddrOfLocal(dest); } if (isCopyBlock && destLclVarTree == nullptr && !src->OperIs(GT_LCL_VAR)) { fgMorphBlockOperand(src, asgType, genTypeSize(asgType), false /*isBlkReqd*/); dest->gtFlags |= GTF_DONT_CSE; return tree; } } else { noway_assert(dest->OperIsLocal()); destLclVarTree = dest; } if (destLclVarTree != nullptr) { destVarNum = destLclVarTree->AsLclVarCommon()->GetLclNum(); destVarDsc = lvaGetDesc(destVarNum); if (asgType == TYP_STRUCT) { clsHnd = destVarDsc->GetStructHnd(); size = destVarDsc->lvExactSize; } } if (asgType != TYP_STRUCT) { size = genTypeSize(asgType); } } if (size == 0) { return nullptr; } if ((destVarDsc != nullptr) && varTypeIsStruct(destLclVarTree) && destVarDsc->lvPromoted) { // Let fgMorphCopyBlock handle it. return nullptr; } if (src->IsCall() || src->OperIsSIMD()) { // Can't take ADDR from these nodes, let fgMorphCopyBlock handle it, #11413. return nullptr; } if ((destVarDsc != nullptr) && !varTypeIsStruct(destVarDsc->TypeGet())) { // // See if we can do a simple transformation: // // GT_ASG <TYP_size> // / \. // GT_IND GT_IND or CNS_INT // | | // [dest] [src] // if (asgType == TYP_STRUCT) { // It is possible to use `initobj` to init a primitive type on the stack, // like `ldloca.s 1; initobj 1B000003` where `V01` has type `ref`; // in this case we generate `ASG struct(BLK<8> struct(ADDR byref(LCL_VAR ref)), 0)` // and this code path transforms it into `ASG ref(LCL_VARref, 0)` because it is not a real // struct assignment. if (size == REGSIZE_BYTES) { if (clsHnd == NO_CLASS_HANDLE) { // A register-sized cpblk can be treated as an integer asignment. asgType = TYP_I_IMPL; } else { BYTE gcPtr; info.compCompHnd->getClassGClayout(clsHnd, &gcPtr); asgType = getJitGCType(gcPtr); } } else { switch (size) { case 1: asgType = TYP_BYTE; break; case 2: asgType = TYP_SHORT; break; #ifdef TARGET_64BIT case 4: asgType = TYP_INT; break; #endif // TARGET_64BIT } } } } GenTree* srcLclVarTree = nullptr; LclVarDsc* srcVarDsc = nullptr; if (isCopyBlock) { if (src->OperGet() == GT_LCL_VAR) { srcLclVarTree = src; srcVarDsc = lvaGetDesc(src->AsLclVarCommon()); } else if (src->OperIsIndir() && impIsAddressInLocal(src->AsOp()->gtOp1, &srcLclVarTree)) { srcVarDsc = lvaGetDesc(srcLclVarTree->AsLclVarCommon()); } if ((srcVarDsc != nullptr) && varTypeIsStruct(srcLclVarTree) && srcVarDsc->lvPromoted) { // Let fgMorphCopyBlock handle it. return nullptr; } } if (asgType != TYP_STRUCT) { noway_assert((size <= REGSIZE_BYTES) || varTypeIsSIMD(asgType)); // For initBlk, a non constant source is not going to allow us to fiddle // with the bits to create a single assigment. // Nor do we (for now) support transforming an InitBlock of SIMD type, unless // it is a direct assignment to a lclVar and the value is zero. if (isInitBlock) { if (!src->IsConstInitVal()) { return nullptr; } if (varTypeIsSIMD(asgType) && (!src->IsIntegralConst(0) || (destVarDsc == nullptr))) { return nullptr; } } if (destVarDsc != nullptr) { // Kill everything about dest if (optLocalAssertionProp) { if (optAssertionCount > 0) { fgKillDependentAssertions(destVarNum DEBUGARG(tree)); } } // A previous incarnation of this code also required the local not to be // address-exposed(=taken). That seems orthogonal to the decision of whether // to do field-wise assignments: being address-exposed will cause it to be // "dependently" promoted, so it will be in the right memory location. One possible // further reason for avoiding field-wise stores is that the struct might have alignment-induced // holes, whose contents could be meaningful in unsafe code. If we decide that's a valid // concern, then we could compromise, and say that address-exposed + fields do not completely cover the // memory of the struct prevent field-wise assignments. Same situation exists for the "src" decision. if (varTypeIsStruct(destLclVarTree) && destVarDsc->lvPromoted) { // Let fgMorphInitBlock handle it. (Since we'll need to do field-var-wise assignments.) return nullptr; } else if (!varTypeIsFloating(destLclVarTree->TypeGet()) && (size == genTypeSize(destVarDsc))) { // Use the dest local var directly, as well as its type. dest = destLclVarTree; asgType = destVarDsc->lvType; // If the block operation had been a write to a local var of a small int type, // of the exact size of the small int type, and the var is NormalizeOnStore, // we would have labeled it GTF_VAR_USEASG, because the block operation wouldn't // have done that normalization. If we're now making it into an assignment, // the NormalizeOnStore will work, and it can be a full def. if (destVarDsc->lvNormalizeOnStore()) { dest->gtFlags &= (~GTF_VAR_USEASG); } } else { // Could be a non-promoted struct, or a floating point type local, or // an int subject to a partial write. Don't enregister. lvaSetVarDoNotEnregister(destVarNum DEBUGARG(DoNotEnregisterReason::OneAsgRetyping)); // Mark the local var tree as a definition point of the local. destLclVarTree->gtFlags |= GTF_VAR_DEF; if (size < destVarDsc->lvExactSize) { // If it's not a full-width assignment.... destLclVarTree->gtFlags |= GTF_VAR_USEASG; } if (dest == destLclVarTree) { GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest); dest = gtNewIndir(asgType, addr); } } } // Check to ensure we don't have a reducible *(& ... ) if (dest->OperIsIndir() && dest->AsIndir()->Addr()->OperGet() == GT_ADDR) { // If dest is an Indir or Block, and it has a child that is a Addr node // GenTree* addrNode = dest->AsIndir()->Addr(); // known to be a GT_ADDR // Can we just remove the Ind(Addr(destOp)) and operate directly on 'destOp'? // GenTree* destOp = addrNode->gtGetOp1(); var_types destOpType = destOp->TypeGet(); // We can if we have a primitive integer type and the sizes are exactly the same. // if ((varTypeIsIntegralOrI(destOp) && (size == genTypeSize(destOpType)))) { dest = destOp; asgType = destOpType; } } if (dest->gtEffectiveVal()->OperIsIndir()) { // If we have no information about the destination, we have to assume it could // live anywhere (not just in the GC heap). // Mark the GT_IND node so that we use the correct write barrier helper in case // the field is a GC ref. if (!fgIsIndirOfAddrOfLocal(dest)) { dest->gtFlags |= (GTF_GLOB_REF | GTF_IND_TGTANYWHERE); tree->gtFlags |= GTF_GLOB_REF; } dest->SetIndirExceptionFlags(this); tree->gtFlags |= (dest->gtFlags & GTF_EXCEPT); } if (isCopyBlock) { if (srcVarDsc != nullptr) { // Handled above. assert(!varTypeIsStruct(srcLclVarTree) || !srcVarDsc->lvPromoted); if (!varTypeIsFloating(srcLclVarTree->TypeGet()) && size == genTypeSize(genActualType(srcLclVarTree->TypeGet()))) { // Use the src local var directly. src = srcLclVarTree; } else { // The source argument of the copyblk can potentially be accessed only through indir(addr(lclVar)) // or indir(lclVarAddr) so it must be on the stack. unsigned lclVarNum = srcLclVarTree->AsLclVarCommon()->GetLclNum(); lvaSetVarDoNotEnregister(lclVarNum DEBUGARG(DoNotEnregisterReason::OneAsgRetyping)); GenTree* srcAddr; if (src == srcLclVarTree) { srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src); src = gtNewOperNode(GT_IND, asgType, srcAddr); } else { assert(src->OperIsIndir()); } } } if (src->OperIsIndir()) { if (!fgIsIndirOfAddrOfLocal(src)) { // If we have no information about the src, we have to assume it could // live anywhere (not just in the GC heap). // Mark the GT_IND node so that we use the correct write barrier helper in case // the field is a GC ref. src->gtFlags |= (GTF_GLOB_REF | GTF_IND_TGTANYWHERE); } src->SetIndirExceptionFlags(this); } } else // InitBlk { #ifdef FEATURE_SIMD if (varTypeIsSIMD(asgType)) { assert(!isCopyBlock); // Else we would have returned the tree above. noway_assert(src->IsIntegralConst(0)); noway_assert(destVarDsc != nullptr); src = gtNewSIMDNode(asgType, src, SIMDIntrinsicInit, destVarDsc->GetSimdBaseJitType(), size); } else #endif { if (src->OperIsInitVal()) { src = src->gtGetOp1(); } assert(src->IsCnsIntOrI()); // This will mutate the integer constant, in place, to be the correct // value for the type we are using in the assignment. src->AsIntCon()->FixupInitBlkValue(asgType); } } // Ensure that the dest is setup appropriately. if (dest->gtEffectiveVal()->OperIsIndir()) { dest = fgMorphBlockOperand(dest, asgType, size, false /*isBlkReqd*/); } // Ensure that the rhs is setup appropriately. if (isCopyBlock) { src = fgMorphBlockOperand(src, asgType, size, false /*isBlkReqd*/); } // Set the lhs and rhs on the assignment. if (dest != tree->AsOp()->gtOp1) { asg->AsOp()->gtOp1 = dest; } if (src != asg->AsOp()->gtOp2) { asg->AsOp()->gtOp2 = src; } asg->ChangeType(asgType); dest->gtFlags |= GTF_DONT_CSE; asg->gtFlags &= ~GTF_EXCEPT; asg->gtFlags |= ((dest->gtFlags | src->gtFlags) & GTF_ALL_EFFECT); // Un-set GTF_REVERSE_OPS, and it will be set later if appropriate. asg->gtFlags &= ~GTF_REVERSE_OPS; #ifdef DEBUG if (verbose) { printf("fgMorphOneAsgBlock (after):\n"); gtDispTree(tree); } #endif return tree; } return nullptr; } //------------------------------------------------------------------------ // fgMorphPromoteLocalInitBlock: Attempts to promote a local block init tree // to a tree of promoted field initialization assignments. // // Arguments: // destLclNode - The destination LclVar node // initVal - The initialization value // blockSize - The amount of bytes to initialize // // Return Value: // A tree that performs field by field initialization of the destination // struct variable if various conditions are met, nullptr otherwise. // // Notes: // This transforms a single block initialization assignment like: // // * ASG struct (init) // +--* BLK(12) struct // | \--* ADDR long // | \--* LCL_VAR struct(P) V02 loc0 // | \--* int V02.a (offs=0x00) -> V06 tmp3 // | \--* ubyte V02.c (offs=0x04) -> V07 tmp4 // | \--* float V02.d (offs=0x08) -> V08 tmp5 // \--* INIT_VAL int // \--* CNS_INT int 42 // // into a COMMA tree of assignments that initialize each promoted struct // field: // // * COMMA void // +--* COMMA void // | +--* ASG int // | | +--* LCL_VAR int V06 tmp3 // | | \--* CNS_INT int 0x2A2A2A2A // | \--* ASG ubyte // | +--* LCL_VAR ubyte V07 tmp4 // | \--* CNS_INT int 42 // \--* ASG float // +--* LCL_VAR float V08 tmp5 // \--* CNS_DBL float 1.5113661732714390e-13 // GenTree* Compiler::fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize) { assert(destLclNode->OperIs(GT_LCL_VAR)); LclVarDsc* destLclVar = lvaGetDesc(destLclNode); assert(varTypeIsStruct(destLclVar->TypeGet())); assert(destLclVar->lvPromoted); if (blockSize == 0) { JITDUMP(" size is zero or unknown.\n"); return nullptr; } if (destLclVar->IsAddressExposed() && destLclVar->lvContainsHoles) { JITDUMP(" dest is address exposed and contains holes.\n"); return nullptr; } if (destLclVar->lvCustomLayout && destLclVar->lvContainsHoles) { // TODO-1stClassStructs: there are no reasons for this pessimization, delete it. JITDUMP(" dest has custom layout and contains holes.\n"); return nullptr; } if (destLclVar->lvExactSize != blockSize) { JITDUMP(" dest size mismatch.\n"); return nullptr; } if (!initVal->OperIs(GT_CNS_INT)) { JITDUMP(" source is not constant.\n"); return nullptr; } const int64_t initPattern = (initVal->AsIntCon()->IconValue() & 0xFF) * 0x0101010101010101LL; if (initPattern != 0) { for (unsigned i = 0; i < destLclVar->lvFieldCnt; ++i) { LclVarDsc* fieldDesc = lvaGetDesc(destLclVar->lvFieldLclStart + i); if (varTypeIsSIMD(fieldDesc->TypeGet()) || varTypeIsGC(fieldDesc->TypeGet())) { // Cannot initialize GC or SIMD types with a non-zero constant. // The former is completly bogus. The later restriction could be // lifted by supporting non-zero SIMD constants or by generating // field initialization code that converts an integer constant to // the appropiate SIMD value. Unlikely to be very useful, though. JITDUMP(" dest contains GC and/or SIMD fields and source constant is not 0.\n"); return nullptr; } } } JITDUMP(" using field by field initialization.\n"); GenTree* tree = nullptr; for (unsigned i = 0; i < destLclVar->lvFieldCnt; ++i) { unsigned fieldLclNum = destLclVar->lvFieldLclStart + i; LclVarDsc* fieldDesc = lvaGetDesc(fieldLclNum); GenTree* dest = gtNewLclvNode(fieldLclNum, fieldDesc->TypeGet()); // If it had been labeled a "USEASG", assignments to the individual promoted fields are not. dest->gtFlags |= (destLclNode->gtFlags & ~(GTF_NODE_MASK | GTF_VAR_USEASG)); GenTree* src; switch (dest->TypeGet()) { case TYP_BOOL: case TYP_BYTE: case TYP_UBYTE: case TYP_SHORT: case TYP_USHORT: // Promoted fields are expected to be "normalize on load". If that changes then // we may need to adjust this code to widen the constant correctly. assert(fieldDesc->lvNormalizeOnLoad()); FALLTHROUGH; case TYP_INT: { int64_t mask = (int64_t(1) << (genTypeSize(dest->TypeGet()) * 8)) - 1; src = gtNewIconNode(static_cast<int32_t>(initPattern & mask)); break; } case TYP_LONG: src = gtNewLconNode(initPattern); break; case TYP_FLOAT: float floatPattern; memcpy(&floatPattern, &initPattern, sizeof(floatPattern)); src = gtNewDconNode(floatPattern, dest->TypeGet()); break; case TYP_DOUBLE: double doublePattern; memcpy(&doublePattern, &initPattern, sizeof(doublePattern)); src = gtNewDconNode(doublePattern, dest->TypeGet()); break; case TYP_REF: case TYP_BYREF: #ifdef FEATURE_SIMD case TYP_SIMD8: case TYP_SIMD12: case TYP_SIMD16: case TYP_SIMD32: #endif // FEATURE_SIMD assert(initPattern == 0); src = gtNewIconNode(0, dest->TypeGet()); break; default: unreached(); } GenTree* asg = gtNewAssignNode(dest, src); if (optLocalAssertionProp) { optAssertionGen(asg); } if (tree != nullptr) { tree = gtNewOperNode(GT_COMMA, TYP_VOID, tree, asg); } else { tree = asg; } } return tree; } //------------------------------------------------------------------------ // fgMorphGetStructAddr: Gets the address of a struct object // // Arguments: // pTree - the parent's pointer to the struct object node // clsHnd - the class handle for the struct type // isRValue - true if this is a source (not dest) // // Return Value: // Returns the address of the struct value, possibly modifying the existing tree to // sink the address below any comma nodes (this is to canonicalize for value numbering). // If this is a source, it will morph it to an GT_IND before taking its address, // since it may not be remorphed (and we don't want blk nodes as rvalues). GenTree* Compiler::fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue) { GenTree* addr; GenTree* tree = *pTree; // If this is an indirection, we can return its op1, unless it's a GTF_IND_ARR_INDEX, in which case we // need to hang onto that for the purposes of value numbering. if (tree->OperIsIndir()) { if ((tree->gtFlags & GTF_IND_ARR_INDEX) == 0) { addr = tree->AsOp()->gtOp1; } else { if (isRValue && tree->OperIsBlk()) { tree->ChangeOper(GT_IND); } addr = gtNewOperNode(GT_ADDR, TYP_BYREF, tree); } } else if (tree->gtOper == GT_COMMA) { // If this is a comma, we're going to "sink" the GT_ADDR below it. (void)fgMorphGetStructAddr(&(tree->AsOp()->gtOp2), clsHnd, isRValue); tree->gtType = TYP_BYREF; addr = tree; } else { switch (tree->gtOper) { case GT_LCL_FLD: case GT_LCL_VAR: case GT_INDEX: case GT_FIELD: case GT_ARR_ELEM: addr = gtNewOperNode(GT_ADDR, TYP_BYREF, tree); break; case GT_INDEX_ADDR: addr = tree; break; default: { // TODO: Consider using lvaGrabTemp and gtNewTempAssign instead, since we're // not going to use "temp" GenTree* temp = fgInsertCommaFormTemp(pTree, clsHnd); unsigned lclNum = temp->gtEffectiveVal()->AsLclVar()->GetLclNum(); lvaSetVarDoNotEnregister(lclNum DEBUG_ARG(DoNotEnregisterReason::VMNeedsStackAddr)); addr = fgMorphGetStructAddr(pTree, clsHnd, isRValue); break; } } } *pTree = addr; return addr; } //------------------------------------------------------------------------ // fgMorphBlockOperand: Canonicalize an operand of a block assignment // // Arguments: // tree - The block operand // asgType - The type of the assignment // blockWidth - The size of the block // isBlkReqd - true iff this operand must remain a block node // // Return Value: // Returns the morphed block operand // // Notes: // This does the following: // - Ensures that a struct operand is a block node or lclVar. // - Ensures that any COMMAs are above ADDR nodes. // Although 'tree' WAS an operand of a block assignment, the assignment // may have been retyped to be a scalar assignment. GenTree* Compiler::fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd) { GenTree* effectiveVal = tree->gtEffectiveVal(); if (asgType != TYP_STRUCT) { if (effectiveVal->OperIsIndir()) { if (!isBlkReqd) { GenTree* addr = effectiveVal->AsIndir()->Addr(); if ((addr->OperGet() == GT_ADDR) && (addr->gtGetOp1()->TypeGet() == asgType)) { effectiveVal = addr->gtGetOp1(); } else if (effectiveVal->OperIsBlk()) { effectiveVal->SetOper(GT_IND); } } effectiveVal->gtType = asgType; } else if (effectiveVal->TypeGet() != asgType) { if (effectiveVal->IsCall()) { #ifdef DEBUG GenTreeCall* call = effectiveVal->AsCall(); assert(call->TypeGet() == TYP_STRUCT); assert(blockWidth == info.compCompHnd->getClassSize(call->gtRetClsHnd)); #endif } else { GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, effectiveVal); effectiveVal = gtNewIndir(asgType, addr); } } } else { GenTreeIndir* indirTree = nullptr; GenTreeLclVarCommon* lclNode = nullptr; bool needsIndirection = true; if (effectiveVal->OperIsIndir()) { indirTree = effectiveVal->AsIndir(); GenTree* addr = effectiveVal->AsIndir()->Addr(); if ((addr->OperGet() == GT_ADDR) && (addr->gtGetOp1()->OperGet() == GT_LCL_VAR)) { lclNode = addr->gtGetOp1()->AsLclVarCommon(); } } else if (effectiveVal->OperGet() == GT_LCL_VAR) { lclNode = effectiveVal->AsLclVarCommon(); } else if (effectiveVal->IsCall()) { needsIndirection = false; #ifdef DEBUG GenTreeCall* call = effectiveVal->AsCall(); assert(call->TypeGet() == TYP_STRUCT); assert(blockWidth == info.compCompHnd->getClassSize(call->gtRetClsHnd)); #endif } #ifdef TARGET_ARM64 else if (effectiveVal->OperIsHWIntrinsic()) { needsIndirection = false; #ifdef DEBUG GenTreeHWIntrinsic* intrinsic = effectiveVal->AsHWIntrinsic(); assert(intrinsic->TypeGet() == TYP_STRUCT); assert(HWIntrinsicInfo::IsMultiReg(intrinsic->GetHWIntrinsicId())); #endif } #endif // TARGET_ARM64 if (lclNode != nullptr) { const LclVarDsc* varDsc = lvaGetDesc(lclNode); if (varTypeIsStruct(varDsc) && (varDsc->lvExactSize == blockWidth) && (varDsc->lvType == asgType)) { if (effectiveVal != lclNode) { JITDUMP("Replacing block node [%06d] with lclVar V%02u\n", dspTreeID(tree), lclNode->GetLclNum()); effectiveVal = lclNode; } needsIndirection = false; } else { // This may be a lclVar that was determined to be address-exposed. effectiveVal->gtFlags |= (lclNode->gtFlags & GTF_ALL_EFFECT); } } if (needsIndirection) { if (indirTree != nullptr) { // If we have an indirection and a block is required, it should already be a block. assert(indirTree->OperIsBlk() || !isBlkReqd); effectiveVal->gtType = asgType; } else { GenTree* newTree; GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, effectiveVal); if (isBlkReqd) { CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleIfPresent(effectiveVal); if (clsHnd == NO_CLASS_HANDLE) { newTree = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, typGetBlkLayout(blockWidth)); } else { newTree = gtNewObjNode(clsHnd, addr); gtSetObjGcInfo(newTree->AsObj()); } } else { newTree = gtNewIndir(asgType, addr); } effectiveVal = newTree; } } } assert(effectiveVal->TypeIs(asgType) || (varTypeIsSIMD(asgType) && varTypeIsStruct(effectiveVal))); tree = effectiveVal; return tree; } //------------------------------------------------------------------------ // fgMorphCanUseLclFldForCopy: check if we can access LclVar2 using LclVar1's fields. // // Arguments: // lclNum1 - a promoted lclVar that is used in fieldwise assignment; // lclNum2 - the local variable on the other side of ASG, can be BAD_VAR_NUM. // // Return Value: // True if the second local is valid and has the same struct handle as the first, // false otherwise. // // Notes: // This check is needed to avoid accessing LCL_VARs with incorrect // CORINFO_FIELD_HANDLE that would confuse VN optimizations. // bool Compiler::fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2) { assert(lclNum1 != BAD_VAR_NUM); if (lclNum2 == BAD_VAR_NUM) { return false; } const LclVarDsc* varDsc1 = lvaGetDesc(lclNum1); const LclVarDsc* varDsc2 = lvaGetDesc(lclNum2); assert(varTypeIsStruct(varDsc1)); if (!varTypeIsStruct(varDsc2)) { return false; } CORINFO_CLASS_HANDLE struct1 = varDsc1->GetStructHnd(); CORINFO_CLASS_HANDLE struct2 = varDsc2->GetStructHnd(); assert(struct1 != NO_CLASS_HANDLE); assert(struct2 != NO_CLASS_HANDLE); if (struct1 != struct2) { return false; } return true; } // insert conversions and normalize to make tree amenable to register // FP architectures GenTree* Compiler::fgMorphForRegisterFP(GenTree* tree) { if (tree->OperIsArithmetic()) { if (varTypeIsFloating(tree)) { GenTree* op1 = tree->AsOp()->gtOp1; GenTree* op2 = tree->gtGetOp2(); assert(varTypeIsFloating(op1->TypeGet()) && varTypeIsFloating(op2->TypeGet())); if (op1->TypeGet() != tree->TypeGet()) { tree->AsOp()->gtOp1 = gtNewCastNode(tree->TypeGet(), op1, false, tree->TypeGet()); } if (op2->TypeGet() != tree->TypeGet()) { tree->AsOp()->gtOp2 = gtNewCastNode(tree->TypeGet(), op2, false, tree->TypeGet()); } } } else if (tree->OperIsCompare()) { GenTree* op1 = tree->AsOp()->gtOp1; if (varTypeIsFloating(op1)) { GenTree* op2 = tree->gtGetOp2(); assert(varTypeIsFloating(op2)); if (op1->TypeGet() != op2->TypeGet()) { // both had better be floating, just one bigger than other if (op1->TypeGet() == TYP_FLOAT) { assert(op2->TypeGet() == TYP_DOUBLE); tree->AsOp()->gtOp1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE); } else if (op2->TypeGet() == TYP_FLOAT) { assert(op1->TypeGet() == TYP_DOUBLE); tree->AsOp()->gtOp2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE); } } } } return tree; } #ifdef FEATURE_SIMD //-------------------------------------------------------------------------------------------------------------- // getSIMDStructFromField: // Checking whether the field belongs to a simd struct or not. If it is, return the GenTree* for // the struct node, also base type, field index and simd size. If it is not, just return nullptr. // Usually if the tree node is from a simd lclvar which is not used in any SIMD intrinsic, then we // should return nullptr, since in this case we should treat SIMD struct as a regular struct. // However if no matter what, you just want get simd struct node, you can set the ignoreUsedInSIMDIntrinsic // as true. Then there will be no IsUsedInSIMDIntrinsic checking, and it will return SIMD struct node // if the struct is a SIMD struct. // // Arguments: // tree - GentreePtr. This node will be checked to see this is a field which belongs to a simd // struct used for simd intrinsic or not. // simdBaseJitTypeOut - CorInfoType pointer, if the tree node is the tree we want, we set *simdBaseJitTypeOut // to simd lclvar's base JIT type. // indexOut - unsigned pointer, if the tree is used for simd intrinsic, we will set *indexOut // equals to the index number of this field. // simdSizeOut - unsigned pointer, if the tree is used for simd intrinsic, set the *simdSizeOut // equals to the simd struct size which this tree belongs to. // ignoreUsedInSIMDIntrinsic - bool. If this is set to true, then this function will ignore // the UsedInSIMDIntrinsic check. // // return value: // A GenTree* which points the simd lclvar tree belongs to. If the tree is not the simd // instrinic related field, return nullptr. // GenTree* Compiler::getSIMDStructFromField(GenTree* tree, CorInfoType* simdBaseJitTypeOut, unsigned* indexOut, unsigned* simdSizeOut, bool ignoreUsedInSIMDIntrinsic /*false*/) { GenTree* ret = nullptr; if (tree->OperGet() == GT_FIELD) { GenTree* objRef = tree->AsField()->GetFldObj(); if (objRef != nullptr) { GenTree* obj = nullptr; if (objRef->gtOper == GT_ADDR) { obj = objRef->AsOp()->gtOp1; } else if (ignoreUsedInSIMDIntrinsic) { obj = objRef; } else { return nullptr; } if (isSIMDTypeLocal(obj)) { LclVarDsc* varDsc = lvaGetDesc(obj->AsLclVarCommon()); if (varDsc->lvIsUsedInSIMDIntrinsic() || ignoreUsedInSIMDIntrinsic) { *simdSizeOut = varDsc->lvExactSize; *simdBaseJitTypeOut = getBaseJitTypeOfSIMDLocal(obj); ret = obj; } } else if (obj->OperGet() == GT_SIMD) { ret = obj; GenTreeSIMD* simdNode = obj->AsSIMD(); *simdSizeOut = simdNode->GetSimdSize(); *simdBaseJitTypeOut = simdNode->GetSimdBaseJitType(); } #ifdef FEATURE_HW_INTRINSICS else if (obj->OperIsHWIntrinsic()) { ret = obj; GenTreeHWIntrinsic* simdNode = obj->AsHWIntrinsic(); *simdSizeOut = simdNode->GetSimdSize(); *simdBaseJitTypeOut = simdNode->GetSimdBaseJitType(); } #endif // FEATURE_HW_INTRINSICS } } if (ret != nullptr) { var_types fieldType = tree->TypeGet(); if (fieldType == TYP_LONG) { // Vector2/3/4 expose public float fields while Vector<T> // and Vector64/128/256<T> have internal ulong fields. So // we should only ever encounter accesses for TYP_FLOAT or // TYP_LONG and in the case of the latter we don't want the // generic type since we are executing some algorithm on the // raw underlying bits instead. *simdBaseJitTypeOut = CORINFO_TYPE_ULONG; } else { assert(fieldType == TYP_FLOAT); } unsigned baseTypeSize = genTypeSize(JITtype2varType(*simdBaseJitTypeOut)); *indexOut = tree->AsField()->gtFldOffset / baseTypeSize; } return ret; } /***************************************************************************** * If a read operation tries to access simd struct field, then transform the operation * to the SimdGetElementNode, and return the new tree. Otherwise, return the old tree. * Argument: * tree - GenTree*. If this pointer points to simd struct which is used for simd * intrinsic, we will morph it as simd intrinsic NI_Vector128_GetElement. * Return: * A GenTree* which points to the new tree. If the tree is not for simd intrinsic, * return nullptr. */ GenTree* Compiler::fgMorphFieldToSimdGetElement(GenTree* tree) { unsigned index = 0; CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned simdSize = 0; GenTree* simdStructNode = getSIMDStructFromField(tree, &simdBaseJitType, &index, &simdSize); if (simdStructNode != nullptr) { var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); GenTree* op2 = gtNewIconNode(index, TYP_INT); assert(simdSize <= 16); assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType))); tree = gtNewSimdGetElementNode(simdBaseType, simdStructNode, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } return tree; } /***************************************************************************** * Transform an assignment of a SIMD struct field to SimdWithElementNode, and * return a new tree. If it is not such an assignment, then return the old tree. * Argument: * tree - GenTree*. If this pointer points to simd struct which is used for simd * intrinsic, we will morph it as simd intrinsic set. * Return: * A GenTree* which points to the new tree. If the tree is not for simd intrinsic, * return nullptr. */ GenTree* Compiler::fgMorphFieldAssignToSimdSetElement(GenTree* tree) { assert(tree->OperGet() == GT_ASG); unsigned index = 0; CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned simdSize = 0; GenTree* simdStructNode = getSIMDStructFromField(tree->gtGetOp1(), &simdBaseJitType, &index, &simdSize); if (simdStructNode != nullptr) { var_types simdType = simdStructNode->gtType; var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(simdSize <= 16); assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType))); GenTree* op2 = gtNewIconNode(index, TYP_INT); GenTree* op3 = tree->gtGetOp2(); NamedIntrinsic intrinsicId = NI_Vector128_WithElement; GenTree* target = gtClone(simdStructNode); assert(target != nullptr); GenTree* simdTree = gtNewSimdWithElementNode(simdType, simdStructNode, op2, op3, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); tree->AsOp()->gtOp1 = target; tree->AsOp()->gtOp2 = simdTree; // fgMorphTree has already called fgMorphImplicitByRefArgs() on this assignment, but the source // and target have not yet been morphed. // Therefore, in case the source and/or target are now implicit byrefs, we need to call it again. if (fgMorphImplicitByRefArgs(tree)) { if (tree->gtGetOp1()->OperIsBlk()) { assert(tree->gtGetOp1()->TypeGet() == simdType); tree->gtGetOp1()->SetOper(GT_IND); tree->gtGetOp1()->gtType = simdType; } } #ifdef DEBUG tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif } return tree; } #endif // FEATURE_SIMD //------------------------------------------------------------------------------ // fgMorphCommutative : Try to simplify "(X op C1) op C2" to "X op C3" // for commutative operators. // // Arguments: // tree - node to fold // // return value: // A folded GenTree* instance or nullptr if something prevents folding. // GenTreeOp* Compiler::fgMorphCommutative(GenTreeOp* tree) { assert(varTypeIsIntegralOrI(tree->TypeGet())); assert(tree->OperIs(GT_ADD, GT_MUL, GT_OR, GT_AND, GT_XOR)); // op1 can be GT_COMMA, in this case we're going to fold // "(op (COMMA(... (op X C1))) C2)" to "(COMMA(... (op X C3)))" GenTree* op1 = tree->gtGetOp1()->gtEffectiveVal(true); genTreeOps oper = tree->OperGet(); if (!op1->OperIs(oper) || !tree->gtGetOp2()->IsCnsIntOrI() || !op1->gtGetOp2()->IsCnsIntOrI() || op1->gtGetOp1()->IsCnsIntOrI()) { return nullptr; } if (!fgGlobalMorph && (op1 != tree->gtGetOp1())) { // Since 'tree->gtGetOp1()' can have complex structure (e.g. COMMA(..(COMMA(..,op1))) // don't run the optimization for such trees outside of global morph. // Otherwise, there is a chance of violating VNs invariants and/or modifying a tree // that is an active CSE candidate. return nullptr; } if (gtIsActiveCSE_Candidate(tree) || gtIsActiveCSE_Candidate(op1)) { // The optimization removes 'tree' from IR and changes the value of 'op1'. return nullptr; } if (tree->OperMayOverflow() && (tree->gtOverflow() || op1->gtOverflow())) { return nullptr; } GenTreeIntCon* cns1 = op1->gtGetOp2()->AsIntCon(); GenTreeIntCon* cns2 = tree->gtGetOp2()->AsIntCon(); if (!varTypeIsIntegralOrI(tree->TypeGet()) || cns1->TypeIs(TYP_REF) || !cns1->TypeIs(cns2->TypeGet())) { return nullptr; } if (gtIsActiveCSE_Candidate(cns1) || gtIsActiveCSE_Candidate(cns2)) { // The optimization removes 'cns2' from IR and changes the value of 'cns1'. return nullptr; } GenTree* folded = gtFoldExprConst(gtNewOperNode(oper, cns1->TypeGet(), cns1, cns2)); if (!folded->IsCnsIntOrI()) { // Give up if we can't fold "C1 op C2" return nullptr; } auto foldedCns = folded->AsIntCon(); cns1->SetIconValue(foldedCns->IconValue()); cns1->SetVNsFromNode(foldedCns); cns1->gtFieldSeq = foldedCns->gtFieldSeq; op1 = tree->gtGetOp1(); op1->SetVNsFromNode(tree); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(cns2); DEBUG_DESTROY_NODE(foldedCns); INDEBUG(cns1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return op1->AsOp(); } //------------------------------------------------------------------------------ // fgMorphCastedBitwiseOp : Try to simplify "(T)x op (T)y" to "(T)(x op y)". // // Arguments: // tree - node to fold // // Return Value: // A folded GenTree* instance, or nullptr if it couldn't be folded GenTree* Compiler::fgMorphCastedBitwiseOp(GenTreeOp* tree) { // This transform does not preserve VNs and deletes a node. assert(fgGlobalMorph); assert(varTypeIsIntegralOrI(tree)); assert(tree->OperIs(GT_OR, GT_AND, GT_XOR)); GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); genTreeOps oper = tree->OperGet(); // see whether both ops are casts, with matching to and from types. if (op1->OperIs(GT_CAST) && op2->OperIs(GT_CAST)) { // bail if either operand is a checked cast if (op1->gtOverflow() || op2->gtOverflow()) { return nullptr; } var_types fromType = op1->AsCast()->CastOp()->TypeGet(); var_types toType = op1->AsCast()->CastToType(); bool isUnsigned = op1->IsUnsigned(); if (varTypeIsFloating(fromType) || (op2->CastFromType() != fromType) || (op2->CastToType() != toType) || (op2->IsUnsigned() != isUnsigned)) { return nullptr; } /* // Reuse gentree nodes: // // tree op1 // / \ | // op1 op2 ==> tree // | | / \. // x y x y // // (op2 becomes garbage) */ tree->gtOp1 = op1->AsCast()->CastOp(); tree->gtOp2 = op2->AsCast()->CastOp(); tree->gtType = genActualType(fromType); op1->gtType = genActualType(toType); op1->AsCast()->gtOp1 = tree; op1->AsCast()->CastToType() = toType; op1->SetAllEffectsFlags(tree); // no need to update isUnsigned DEBUG_DESTROY_NODE(op2); INDEBUG(op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return op1; } return nullptr; } /***************************************************************************** * * Transform the given GTK_SMPOP tree for code generation. */ #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) { ALLOCA_CHECK(); assert(tree->OperKind() & GTK_SMPOP); /* The steps in this function are : o Perform required preorder processing o Process the first, then second operand, if any o Perform required postorder morphing o Perform optional postorder morphing if optimizing */ bool isQmarkColon = false; AssertionIndex origAssertionCount = DUMMY_INIT(0); AssertionDsc* origAssertionTab = DUMMY_INIT(NULL); AssertionIndex thenAssertionCount = DUMMY_INIT(0); AssertionDsc* thenAssertionTab = DUMMY_INIT(NULL); if (fgGlobalMorph) { tree = fgMorphForRegisterFP(tree); } genTreeOps oper = tree->OperGet(); var_types typ = tree->TypeGet(); GenTree* op1 = tree->AsOp()->gtOp1; GenTree* op2 = tree->gtGetOp2IfPresent(); /*------------------------------------------------------------------------- * First do any PRE-ORDER processing */ switch (oper) { // Some arithmetic operators need to use a helper call to the EE int helper; case GT_ASG: tree = fgDoNormalizeOnStore(tree); /* fgDoNormalizeOnStore can change op2 */ noway_assert(op1 == tree->AsOp()->gtOp1); op2 = tree->AsOp()->gtOp2; #ifdef FEATURE_SIMD if (IsBaselineSimdIsaSupported()) { // We should check whether op2 should be assigned to a SIMD field or not. // If it is, we should tranlate the tree to simd intrinsic. assert(!fgGlobalMorph || ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0)); GenTree* newTree = fgMorphFieldAssignToSimdSetElement(tree); typ = tree->TypeGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2(); #ifdef DEBUG assert((tree == newTree) && (tree->OperGet() == oper)); if ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) != 0) { tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; } #endif // DEBUG } #endif // We can't CSE the LHS of an assignment. Only r-values can be CSEed. // Previously, the "lhs" (addr) of a block op was CSE'd. So, to duplicate the former // behavior, allow CSE'ing if is a struct type (or a TYP_REF transformed from a struct type) // TODO-1stClassStructs: improve this. if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT)) { op1->gtFlags |= GTF_DONT_CSE; } break; case GT_ADDR: /* op1 of a GT_ADDR is an l-value. Only r-values can be CSEed */ op1->gtFlags |= GTF_DONT_CSE; break; case GT_QMARK: case GT_JTRUE: noway_assert(op1); if (op1->OperIsCompare()) { /* Mark the comparison node with GTF_RELOP_JMP_USED so it knows that it does not need to materialize the result as a 0 or 1. */ /* We also mark it as DONT_CSE, as we don't handle QMARKs with nonRELOP op1s */ op1->gtFlags |= (GTF_RELOP_JMP_USED | GTF_DONT_CSE); // Request that the codegen for op1 sets the condition flags // when it generates the code for op1. // // Codegen for op1 must set the condition flags if // this method returns true. // op1->gtRequestSetFlags(); } else { GenTree* effOp1 = op1->gtEffectiveVal(); noway_assert((effOp1->gtOper == GT_CNS_INT) && (effOp1->IsIntegralConst(0) || effOp1->IsIntegralConst(1))); } break; case GT_COLON: if (optLocalAssertionProp) { isQmarkColon = true; } break; case GT_FIELD: return fgMorphField(tree, mac); case GT_INDEX: return fgMorphArrayIndex(tree); case GT_CAST: { GenTree* morphedCast = fgMorphExpandCast(tree->AsCast()); if (morphedCast != nullptr) { return morphedCast; } op1 = tree->AsCast()->CastOp(); } break; case GT_MUL: noway_assert(op2 != nullptr); if (opts.OptimizationEnabled() && !optValnumCSE_phase && !tree->gtOverflow()) { // MUL(NEG(a), C) => MUL(a, NEG(C)) if (op1->OperIs(GT_NEG) && !op1->gtGetOp1()->IsCnsIntOrI() && op2->IsCnsIntOrI() && !op2->IsIconHandle()) { GenTree* newOp1 = op1->gtGetOp1(); GenTree* newConst = gtNewIconNode(-op2->AsIntCon()->IconValue(), op2->TypeGet()); DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(op2); tree->AsOp()->gtOp1 = newOp1; tree->AsOp()->gtOp2 = newConst; return fgMorphSmpOp(tree, mac); } } #ifndef TARGET_64BIT if (typ == TYP_LONG) { // For (long)int1 * (long)int2, we dont actually do the // casts, and just multiply the 32 bit values, which will // give us the 64 bit result in edx:eax. if (tree->Is64RsltMul()) { // We are seeing this node again. // Morph only the children of casts, // so as to avoid losing them. tree = fgMorphLongMul(tree->AsOp()); goto DONE_MORPHING_CHILDREN; } tree = fgRecognizeAndMorphLongMul(tree->AsOp()); op1 = tree->AsOp()->gtGetOp1(); op2 = tree->AsOp()->gtGetOp2(); if (tree->Is64RsltMul()) { goto DONE_MORPHING_CHILDREN; } else { if (tree->gtOverflow()) helper = tree->IsUnsigned() ? CORINFO_HELP_ULMUL_OVF : CORINFO_HELP_LMUL_OVF; else helper = CORINFO_HELP_LMUL; goto USE_HELPER_FOR_ARITH; } } #endif // !TARGET_64BIT break; case GT_ARR_LENGTH: if (op1->OperIs(GT_CNS_STR)) { // Optimize `ldstr + String::get_Length()` to CNS_INT // e.g. "Hello".Length => 5 GenTreeIntCon* iconNode = gtNewStringLiteralLength(op1->AsStrCon()); if (iconNode != nullptr) { INDEBUG(iconNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return iconNode; } } break; case GT_DIV: // Replace "val / dcon" with "val * (1.0 / dcon)" if dcon is a power of two. // Powers of two within range are always exactly represented, // so multiplication by the reciprocal is safe in this scenario if (fgGlobalMorph && op2->IsCnsFltOrDbl()) { double divisor = op2->AsDblCon()->gtDconVal; if (((typ == TYP_DOUBLE) && FloatingPointUtils::hasPreciseReciprocal(divisor)) || ((typ == TYP_FLOAT) && FloatingPointUtils::hasPreciseReciprocal(forceCastToFloat(divisor)))) { oper = GT_MUL; tree->ChangeOper(oper); op2->AsDblCon()->gtDconVal = 1.0 / divisor; } } // Convert DIV to UDIV if boths op1 and op2 are known to be never negative if (!gtIsActiveCSE_Candidate(tree) && varTypeIsIntegral(tree) && op1->IsNeverNegative(this) && op2->IsNeverNegative(this)) { assert(tree->OperIs(GT_DIV)); tree->ChangeOper(GT_UDIV, GenTree::PRESERVE_VN); return fgMorphSmpOp(tree, mac); } #ifndef TARGET_64BIT if (typ == TYP_LONG) { helper = CORINFO_HELP_LDIV; goto USE_HELPER_FOR_ARITH; } #if USE_HELPERS_FOR_INT_DIV if (typ == TYP_INT) { helper = CORINFO_HELP_DIV; goto USE_HELPER_FOR_ARITH; } #endif #endif // !TARGET_64BIT break; case GT_UDIV: #ifndef TARGET_64BIT if (typ == TYP_LONG) { helper = CORINFO_HELP_ULDIV; goto USE_HELPER_FOR_ARITH; } #if USE_HELPERS_FOR_INT_DIV if (typ == TYP_INT) { helper = CORINFO_HELP_UDIV; goto USE_HELPER_FOR_ARITH; } #endif #endif // TARGET_64BIT break; case GT_MOD: if (varTypeIsFloating(typ)) { helper = CORINFO_HELP_DBLREM; noway_assert(op2); if (op1->TypeGet() == TYP_FLOAT) { if (op2->TypeGet() == TYP_FLOAT) { helper = CORINFO_HELP_FLTREM; } else { tree->AsOp()->gtOp1 = op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE); } } else if (op2->TypeGet() == TYP_FLOAT) { tree->AsOp()->gtOp2 = op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE); } goto USE_HELPER_FOR_ARITH; } // Convert MOD to UMOD if boths op1 and op2 are known to be never negative if (!gtIsActiveCSE_Candidate(tree) && varTypeIsIntegral(tree) && op1->IsNeverNegative(this) && op2->IsNeverNegative(this)) { assert(tree->OperIs(GT_MOD)); tree->ChangeOper(GT_UMOD, GenTree::PRESERVE_VN); return fgMorphSmpOp(tree, mac); } // Do not use optimizations (unlike UMOD's idiv optimizing during codegen) for signed mod. // A similar optimization for signed mod will not work for a negative perfectly divisible // HI-word. To make it correct, we would need to divide without the sign and then flip the // result sign after mod. This requires 18 opcodes + flow making it not worthy to inline. goto ASSIGN_HELPER_FOR_MOD; case GT_UMOD: #ifdef TARGET_ARMARCH // // Note for TARGET_ARMARCH we don't have a remainder instruction, so we don't do this optimization // #else // TARGET_XARCH // If this is an unsigned long mod with a constant divisor, // then don't morph to a helper call - it can be done faster inline using idiv. noway_assert(op2); if ((typ == TYP_LONG) && opts.OptEnabled(CLFLG_CONSTANTFOLD)) { if (op2->OperIs(GT_CNS_NATIVELONG) && op2->AsIntConCommon()->LngValue() >= 2 && op2->AsIntConCommon()->LngValue() <= 0x3fffffff) { tree->AsOp()->gtOp1 = op1 = fgMorphTree(op1); noway_assert(op1->TypeIs(TYP_LONG)); // Update flags for op1 morph. tree->gtFlags &= ~GTF_ALL_EFFECT; // Only update with op1 as op2 is a constant. tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT); // If op1 is a constant, then do constant folding of the division operator. if (op1->OperIs(GT_CNS_NATIVELONG)) { tree = gtFoldExpr(tree); } if (!tree->OperIsConst()) { tree->AsOp()->CheckDivideByConstOptimized(this); } return tree; } } #endif // TARGET_XARCH ASSIGN_HELPER_FOR_MOD: // For "val % 1", return 0 if op1 doesn't have any side effects // and we are not in the CSE phase, we cannot discard 'tree' // because it may contain CSE expressions that we haven't yet examined. // if (((op1->gtFlags & GTF_SIDE_EFFECT) == 0) && !optValnumCSE_phase) { if (op2->IsIntegralConst(1)) { GenTree* zeroNode = gtNewZeroConNode(typ); #ifdef DEBUG zeroNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif DEBUG_DESTROY_NODE(tree); return zeroNode; } } #ifndef TARGET_64BIT if (typ == TYP_LONG) { helper = (oper == GT_UMOD) ? CORINFO_HELP_ULMOD : CORINFO_HELP_LMOD; goto USE_HELPER_FOR_ARITH; } #if USE_HELPERS_FOR_INT_DIV if (typ == TYP_INT) { if (oper == GT_UMOD) { helper = CORINFO_HELP_UMOD; goto USE_HELPER_FOR_ARITH; } else if (oper == GT_MOD) { helper = CORINFO_HELP_MOD; goto USE_HELPER_FOR_ARITH; } } #endif #endif // !TARGET_64BIT #ifdef TARGET_ARM64 // For ARM64 we don't have a remainder instruction, // The architecture manual suggests the following transformation to // generate code for such operator: // // a % b = a - (a / b) * b; // // TODO: there are special cases where it can be done better, for example // when the modulo operation is unsigned and the divisor is a // integer constant power of two. In this case, we can make the transform: // // a % b = a & (b - 1); // // Lower supports it for all cases except when `a` is constant, but // in Morph we can't guarantee that `a` won't be transformed into a constant, // so can't guarantee that lower will be able to do this optimization. { // Do "a % b = a - (a / b) * b" morph always, see TODO before this block. bool doMorphModToSubMulDiv = true; if (doMorphModToSubMulDiv) { assert(!optValnumCSE_phase); tree = fgMorphModToSubMulDiv(tree->AsOp()); op1 = tree->AsOp()->gtOp1; op2 = tree->AsOp()->gtOp2; } } #else // !TARGET_ARM64 // If b is not a power of 2 constant then lowering replaces a % b // with a - (a / b) * b and applies magic division optimization to // a / b. The code may already contain an a / b expression (e.g. // x = a / 10; y = a % 10;) and then we end up with redundant code. // If we convert % to / here we give CSE the opportunity to eliminate // the redundant division. If there's no redundant division then // nothing is lost, lowering would have done this transform anyway. if (!optValnumCSE_phase && ((tree->OperGet() == GT_MOD) && op2->IsIntegralConst())) { ssize_t divisorValue = op2->AsIntCon()->IconValue(); size_t absDivisorValue = (divisorValue == SSIZE_T_MIN) ? static_cast<size_t>(divisorValue) : static_cast<size_t>(abs(divisorValue)); if (!isPow2(absDivisorValue)) { tree = fgMorphModToSubMulDiv(tree->AsOp()); op1 = tree->AsOp()->gtOp1; op2 = tree->AsOp()->gtOp2; } } #endif // !TARGET_ARM64 break; USE_HELPER_FOR_ARITH: { // TODO: this comment is wrong now, do an appropriate fix. /* We have to morph these arithmetic operations into helper calls before morphing the arguments (preorder), else the arguments won't get correct values of fgPtrArgCntCur. However, try to fold the tree first in case we end up with a simple node which won't need a helper call at all */ noway_assert(tree->OperIsBinary()); GenTree* oldTree = tree; tree = gtFoldExpr(tree); // Were we able to fold it ? // Note that gtFoldExpr may return a non-leaf even if successful // e.g. for something like "expr / 1" - see also bug #290853 if (tree->OperIsLeaf() || (oldTree != tree)) { return (oldTree != tree) ? fgMorphTree(tree) : fgMorphLeaf(tree); } // Did we fold it into a comma node with throw? if (tree->gtOper == GT_COMMA) { noway_assert(fgIsCommaThrow(tree)); return fgMorphTree(tree); } } return fgMorphIntoHelperCall(tree, helper, gtNewCallArgs(op1, op2)); case GT_RETURN: if (!tree->TypeIs(TYP_VOID)) { if (op1->OperIs(GT_OBJ, GT_BLK, GT_IND)) { op1 = fgMorphRetInd(tree->AsUnOp()); } if (op1->OperIs(GT_LCL_VAR)) { // With a `genReturnBB` this `RETURN(src)` tree will be replaced by a `ASG(genReturnLocal, src)` // and `ASG` will be tranformed into field by field copy without parent local referencing if // possible. GenTreeLclVar* lclVar = op1->AsLclVar(); unsigned lclNum = lclVar->GetLclNum(); if ((genReturnLocal == BAD_VAR_NUM) || (genReturnLocal == lclNum)) { LclVarDsc* varDsc = lvaGetDesc(lclVar); if (varDsc->CanBeReplacedWithItsField(this)) { // We can replace the struct with its only field and allow copy propagation to replace // return value that was written as a field. unsigned fieldLclNum = varDsc->lvFieldLclStart; LclVarDsc* fieldDsc = lvaGetDesc(fieldLclNum); JITDUMP("Replacing an independently promoted local var V%02u with its only field " "V%02u for " "the return [%06u]\n", lclVar->GetLclNum(), fieldLclNum, dspTreeID(tree)); lclVar->SetLclNum(fieldLclNum); lclVar->ChangeType(fieldDsc->lvType); } } } } // normalize small integer return values if (fgGlobalMorph && varTypeIsSmall(info.compRetType) && (op1 != nullptr) && !op1->TypeIs(TYP_VOID) && fgCastNeeded(op1, info.compRetType)) { // Small-typed return values are normalized by the callee op1 = gtNewCastNode(TYP_INT, op1, false, info.compRetType); // Propagate GTF_COLON_COND op1->gtFlags |= (tree->gtFlags & GTF_COLON_COND); tree->AsOp()->gtOp1 = fgMorphTree(op1); // Propagate side effect flags tree->SetAllEffectsFlags(tree->AsOp()->gtGetOp1()); return tree; } break; case GT_EQ: case GT_NE: { GenTree* optimizedTree = gtFoldTypeCompare(tree); if (optimizedTree != tree) { return fgMorphTree(optimizedTree); } // Pattern-matching optimization: // (a % c) ==/!= 0 // for power-of-2 constant `c` // => // a & (c - 1) ==/!= 0 // For integer `a`, even if negative. if (opts.OptimizationEnabled() && !optValnumCSE_phase) { assert(tree->OperIs(GT_EQ, GT_NE)); if (op1->OperIs(GT_MOD) && varTypeIsIntegral(op1) && op2->IsIntegralConst(0)) { GenTree* op1op2 = op1->AsOp()->gtOp2; if (op1op2->IsCnsIntOrI()) { const ssize_t modValue = op1op2->AsIntCon()->IconValue(); if (isPow2(modValue)) { JITDUMP("\nTransforming:\n"); DISPTREE(tree); op1->SetOper(GT_AND); // Change % => & op1op2->AsIntConCommon()->SetIconValue(modValue - 1); // Change c => c - 1 fgUpdateConstTreeValueNumber(op1op2); JITDUMP("\ninto:\n"); DISPTREE(tree); } } } } } FALLTHROUGH; case GT_GT: { // Try and optimize nullable boxes feeding compares GenTree* optimizedTree = gtFoldBoxNullable(tree); if (optimizedTree->OperGet() != tree->OperGet()) { return optimizedTree; } else { tree = optimizedTree; } op1 = tree->AsOp()->gtOp1; op2 = tree->gtGetOp2IfPresent(); break; } case GT_RUNTIMELOOKUP: return fgMorphTree(op1); #ifdef TARGET_ARM case GT_INTRINSIC: if (tree->AsIntrinsic()->gtIntrinsicName == NI_System_Math_Round) { switch (tree->TypeGet()) { case TYP_DOUBLE: return fgMorphIntoHelperCall(tree, CORINFO_HELP_DBLROUND, gtNewCallArgs(op1)); case TYP_FLOAT: return fgMorphIntoHelperCall(tree, CORINFO_HELP_FLTROUND, gtNewCallArgs(op1)); default: unreached(); } } break; #endif case GT_PUTARG_TYPE: return fgMorphTree(tree->AsUnOp()->gtGetOp1()); case GT_NULLCHECK: { op1 = tree->AsUnOp()->gtGetOp1(); if (op1->IsCall()) { GenTreeCall* const call = op1->AsCall(); if (call->IsHelperCall() && s_helperCallProperties.NonNullReturn(eeGetHelperNum(call->gtCallMethHnd))) { JITDUMP("\nNULLCHECK on [%06u] will always succeed\n", dspTreeID(call)); // TODO: Can we also remove the call? // return fgMorphTree(call); } } } break; default: break; } if (opts.OptimizationEnabled() && fgGlobalMorph) { GenTree* morphed = fgMorphReduceAddOps(tree); if (morphed != tree) return fgMorphTree(morphed); } /*------------------------------------------------------------------------- * Process the first operand, if any */ if (op1) { // If we are entering the "then" part of a Qmark-Colon we must // save the state of the current copy assignment table // so that we can restore this state when entering the "else" part if (isQmarkColon) { noway_assert(optLocalAssertionProp); if (optAssertionCount) { noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea unsigned tabSize = optAssertionCount * sizeof(AssertionDsc); origAssertionTab = (AssertionDsc*)ALLOCA(tabSize); origAssertionCount = optAssertionCount; memcpy(origAssertionTab, optAssertionTabPrivate, tabSize); } else { origAssertionCount = 0; origAssertionTab = nullptr; } } // We might need a new MorphAddressContext context. (These are used to convey // parent context about how addresses being calculated will be used; see the // specification comment for MorphAddrContext for full details.) // Assume it's an Ind context to start. MorphAddrContext subIndMac1(MACK_Ind); MorphAddrContext* subMac1 = mac; if (subMac1 == nullptr || subMac1->m_kind == MACK_Ind) { switch (tree->gtOper) { case GT_ADDR: // A non-null mac here implies this node is part of an address computation. // If so, we need to pass the existing mac down to the child node. // // Otherwise, use a new mac. if (subMac1 == nullptr) { subMac1 = &subIndMac1; subMac1->m_kind = MACK_Addr; } break; case GT_COMMA: // In a comma, the incoming context only applies to the rightmost arg of the // comma list. The left arg (op1) gets a fresh context. subMac1 = nullptr; break; case GT_OBJ: case GT_BLK: case GT_IND: // A non-null mac here implies this node is part of an address computation (the tree parent is // GT_ADDR). // If so, we need to pass the existing mac down to the child node. // // Otherwise, use a new mac. if (subMac1 == nullptr) { subMac1 = &subIndMac1; } break; default: break; } } // For additions, if we're in an IND context keep track of whether // all offsets added to the address are constant, and their sum. if (tree->gtOper == GT_ADD && subMac1 != nullptr) { assert(subMac1->m_kind == MACK_Ind || subMac1->m_kind == MACK_Addr); // Can't be a CopyBlock. GenTree* otherOp = tree->AsOp()->gtOp2; // Is the other operator a constant? if (otherOp->IsCnsIntOrI()) { ClrSafeInt<size_t> totalOffset(subMac1->m_totalOffset); totalOffset += otherOp->AsIntConCommon()->IconValue(); if (totalOffset.IsOverflow()) { // We will consider an offset so large as to overflow as "not a constant" -- // we will do a null check. subMac1->m_allConstantOffsets = false; } else { subMac1->m_totalOffset += otherOp->AsIntConCommon()->IconValue(); } } else { subMac1->m_allConstantOffsets = false; } } // If op1 is a GT_FIELD or indir, we need to pass down the mac if // its parent is GT_ADDR, since the address of op1 // is part of an ongoing address computation. Otherwise // op1 represents the value of the field and so any address // calculations it does are in a new context. if (((op1->gtOper == GT_FIELD) || op1->OperIsIndir()) && (tree->gtOper != GT_ADDR)) { subMac1 = nullptr; // The impact of op1's value to any ongoing // address computation is handled below when looking // at op2. } tree->AsOp()->gtOp1 = op1 = fgMorphTree(op1, subMac1); // If we are exiting the "then" part of a Qmark-Colon we must // save the state of the current copy assignment table // so that we can merge this state with the "else" part exit if (isQmarkColon) { noway_assert(optLocalAssertionProp); if (optAssertionCount) { noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea unsigned tabSize = optAssertionCount * sizeof(AssertionDsc); thenAssertionTab = (AssertionDsc*)ALLOCA(tabSize); thenAssertionCount = optAssertionCount; memcpy(thenAssertionTab, optAssertionTabPrivate, tabSize); } else { thenAssertionCount = 0; thenAssertionTab = nullptr; } } /* Morphing along with folding and inlining may have changed the * side effect flags, so we have to reset them * * NOTE: Don't reset the exception flags on nodes that may throw */ assert(tree->gtOper != GT_CALL); if (!tree->OperRequiresCallFlag(this)) { tree->gtFlags &= ~GTF_CALL; } /* Propagate the new flags */ tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT); // &aliasedVar doesn't need GTF_GLOB_REF, though alisasedVar does // Similarly for clsVar if (oper == GT_ADDR && (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CLS_VAR)) { tree->gtFlags &= ~GTF_GLOB_REF; } } // if (op1) /*------------------------------------------------------------------------- * Process the second operand, if any */ if (op2) { // If we are entering the "else" part of a Qmark-Colon we must // reset the state of the current copy assignment table if (isQmarkColon) { noway_assert(optLocalAssertionProp); optAssertionReset(0); if (origAssertionCount) { size_t tabSize = origAssertionCount * sizeof(AssertionDsc); memcpy(optAssertionTabPrivate, origAssertionTab, tabSize); optAssertionReset(origAssertionCount); } } // We might need a new MorphAddressContext context to use in evaluating op2. // (These are used to convey parent context about how addresses being calculated // will be used; see the specification comment for MorphAddrContext for full details.) // Assume it's an Ind context to start. switch (tree->gtOper) { case GT_ADD: if (mac != nullptr && mac->m_kind == MACK_Ind) { GenTree* otherOp = tree->AsOp()->gtOp1; // Is the other operator a constant? if (otherOp->IsCnsIntOrI()) { mac->m_totalOffset += otherOp->AsIntConCommon()->IconValue(); } else { mac->m_allConstantOffsets = false; } } break; default: break; } // If op2 is a GT_FIELD or indir, we must be taking its value, // so it should evaluate its address in a new context. if ((op2->gtOper == GT_FIELD) || op2->OperIsIndir()) { // The impact of op2's value to any ongoing // address computation is handled above when looking // at op1. mac = nullptr; } tree->AsOp()->gtOp2 = op2 = fgMorphTree(op2, mac); /* Propagate the side effect flags from op2 */ tree->gtFlags |= (op2->gtFlags & GTF_ALL_EFFECT); // If we are exiting the "else" part of a Qmark-Colon we must // merge the state of the current copy assignment table with // that of the exit of the "then" part. if (isQmarkColon) { noway_assert(optLocalAssertionProp); // If either exit table has zero entries then // the merged table also has zero entries if (optAssertionCount == 0 || thenAssertionCount == 0) { optAssertionReset(0); } else { size_t tabSize = optAssertionCount * sizeof(AssertionDsc); if ((optAssertionCount != thenAssertionCount) || (memcmp(thenAssertionTab, optAssertionTabPrivate, tabSize) != 0)) { // Yes they are different so we have to find the merged set // Iterate over the copy asgn table removing any entries // that do not have an exact match in the thenAssertionTab AssertionIndex index = 1; while (index <= optAssertionCount) { AssertionDsc* curAssertion = optGetAssertion(index); for (unsigned j = 0; j < thenAssertionCount; j++) { AssertionDsc* thenAssertion = &thenAssertionTab[j]; // Do the left sides match? if ((curAssertion->op1.lcl.lclNum == thenAssertion->op1.lcl.lclNum) && (curAssertion->assertionKind == thenAssertion->assertionKind)) { // Do the right sides match? if ((curAssertion->op2.kind == thenAssertion->op2.kind) && (curAssertion->op2.lconVal == thenAssertion->op2.lconVal)) { goto KEEP; } else { goto REMOVE; } } } // // If we fall out of the loop above then we didn't find // any matching entry in the thenAssertionTab so it must // have been killed on that path so we remove it here // REMOVE: // The data at optAssertionTabPrivate[i] is to be removed CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("The QMARK-COLON "); printTreeID(tree); printf(" removes assertion candidate #%d\n", index); } #endif optAssertionRemove(index); continue; KEEP: // The data at optAssertionTabPrivate[i] is to be kept index++; } } } } } // if (op2) #ifndef TARGET_64BIT DONE_MORPHING_CHILDREN: #endif // !TARGET_64BIT if (tree->OperIsIndirOrArrLength()) { tree->SetIndirExceptionFlags(this); } else { if (tree->OperMayThrow(this)) { // Mark the tree node as potentially throwing an exception tree->gtFlags |= GTF_EXCEPT; } else { if (((op1 == nullptr) || ((op1->gtFlags & GTF_EXCEPT) == 0)) && ((op2 == nullptr) || ((op2->gtFlags & GTF_EXCEPT) == 0))) { tree->gtFlags &= ~GTF_EXCEPT; } } } if (tree->OperRequiresAsgFlag()) { tree->gtFlags |= GTF_ASG; } else { if (((op1 == nullptr) || ((op1->gtFlags & GTF_ASG) == 0)) && ((op2 == nullptr) || ((op2->gtFlags & GTF_ASG) == 0))) { tree->gtFlags &= ~GTF_ASG; } } if (tree->OperRequiresCallFlag(this)) { tree->gtFlags |= GTF_CALL; } else { if (((op1 == nullptr) || ((op1->gtFlags & GTF_CALL) == 0)) && ((op2 == nullptr) || ((op2->gtFlags & GTF_CALL) == 0))) { tree->gtFlags &= ~GTF_CALL; } } /*------------------------------------------------------------------------- * Now do POST-ORDER processing */ if (varTypeIsGC(tree->TypeGet()) && (op1 && !varTypeIsGC(op1->TypeGet())) && (op2 && !varTypeIsGC(op2->TypeGet()))) { // The tree is really not GC but was marked as such. Now that the // children have been unmarked, unmark the tree too. // Remember that GT_COMMA inherits it's type only from op2 if (tree->gtOper == GT_COMMA) { tree->gtType = genActualType(op2->TypeGet()); } else { tree->gtType = genActualType(op1->TypeGet()); } } GenTree* oldTree = tree; GenTree* qmarkOp1 = nullptr; GenTree* qmarkOp2 = nullptr; if ((tree->OperGet() == GT_QMARK) && (tree->AsOp()->gtOp2->OperGet() == GT_COLON)) { qmarkOp1 = oldTree->AsOp()->gtOp2->AsOp()->gtOp1; qmarkOp2 = oldTree->AsOp()->gtOp2->AsOp()->gtOp2; } // Try to fold it, maybe we get lucky, tree = gtFoldExpr(tree); if (oldTree != tree) { /* if gtFoldExpr returned op1 or op2 then we are done */ if ((tree == op1) || (tree == op2) || (tree == qmarkOp1) || (tree == qmarkOp2)) { return tree; } /* If we created a comma-throw tree then we need to morph op1 */ if (fgIsCommaThrow(tree)) { tree->AsOp()->gtOp1 = fgMorphTree(tree->AsOp()->gtOp1); fgMorphTreeDone(tree); return tree; } return tree; } else if (tree->OperIsConst()) { return tree; } /* gtFoldExpr could have used setOper to change the oper */ oper = tree->OperGet(); typ = tree->TypeGet(); /* gtFoldExpr could have changed op1 and op2 */ op1 = tree->AsOp()->gtOp1; op2 = tree->gtGetOp2IfPresent(); // Do we have an integer compare operation? // if (tree->OperIsCompare() && varTypeIsIntegralOrI(tree->TypeGet())) { // Are we comparing against zero? // if (op2->IsIntegralConst(0)) { // Request that the codegen for op1 sets the condition flags // when it generates the code for op1. // // Codegen for op1 must set the condition flags if // this method returns true. // op1->gtRequestSetFlags(); } } /*------------------------------------------------------------------------- * Perform the required oper-specific postorder morphing */ GenTree* temp; size_t ival1; GenTree* lclVarTree; GenTree* effectiveOp1; FieldSeqNode* fieldSeq = nullptr; switch (oper) { case GT_ASG: if (op1->OperIs(GT_LCL_VAR) && ((op1->gtFlags & GTF_VAR_FOLDED_IND) != 0)) { op1->gtFlags &= ~GTF_VAR_FOLDED_IND; tree = fgDoNormalizeOnStore(tree); op2 = tree->gtGetOp2(); } lclVarTree = fgIsIndirOfAddrOfLocal(op1); if (lclVarTree != nullptr) { lclVarTree->gtFlags |= GTF_VAR_DEF; } effectiveOp1 = op1->gtEffectiveVal(); // If we are storing a small type, we might be able to omit a cast. if (effectiveOp1->OperIs(GT_IND, GT_CLS_VAR) && varTypeIsSmall(effectiveOp1)) { if (!gtIsActiveCSE_Candidate(op2) && op2->OperIs(GT_CAST) && varTypeIsIntegral(op2->AsCast()->CastOp()) && !op2->gtOverflow()) { var_types castType = op2->CastToType(); // If we are performing a narrowing cast and // castType is larger or the same as op1's type // then we can discard the cast. if (varTypeIsSmall(castType) && (genTypeSize(castType) >= genTypeSize(effectiveOp1))) { tree->AsOp()->gtOp2 = op2 = op2->AsCast()->CastOp(); } } } fgAssignSetVarDef(tree); /* We can't CSE the LHS of an assignment */ /* We also must set in the pre-morphing phase, otherwise assertionProp doesn't see it */ if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT)) { op1->gtFlags |= GTF_DONT_CSE; } break; case GT_CAST: tree = fgOptimizeCast(tree->AsCast()); if (!tree->OperIsSimple()) { return tree; } if (tree->OperIs(GT_CAST) && tree->gtOverflow()) { fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW); } typ = tree->TypeGet(); oper = tree->OperGet(); op1 = tree->AsOp()->gtGetOp1(); op2 = tree->gtGetOp2IfPresent(); break; case GT_EQ: case GT_NE: // It is not safe to reorder/delete CSE's if (!optValnumCSE_phase && op2->IsIntegralConst()) { tree = fgOptimizeEqualityComparisonWithConst(tree->AsOp()); assert(tree->OperIsCompare()); oper = tree->OperGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2(); } goto COMPARE; case GT_LT: case GT_LE: case GT_GE: case GT_GT: if (!optValnumCSE_phase && (op1->OperIs(GT_CAST) || op2->OperIs(GT_CAST))) { tree = fgOptimizeRelationalComparisonWithCasts(tree->AsOp()); oper = tree->OperGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2(); } // op2's value may be changed, so it cannot be a CSE candidate. if (op2->IsIntegralConst() && !gtIsActiveCSE_Candidate(op2)) { tree = fgOptimizeRelationalComparisonWithConst(tree->AsOp()); oper = tree->OperGet(); assert(op1 == tree->AsOp()->gtGetOp1()); assert(op2 == tree->AsOp()->gtGetOp2()); } COMPARE: noway_assert(tree->OperIsCompare()); break; case GT_MUL: #ifndef TARGET_64BIT if (typ == TYP_LONG) { // This must be GTF_MUL_64RSLT INDEBUG(tree->AsOp()->DebugCheckLongMul()); return tree; } #endif // TARGET_64BIT goto CM_OVF_OP; case GT_SUB: if (tree->gtOverflow()) { goto CM_OVF_OP; } // TODO #4104: there are a lot of other places where // this condition is not checked before transformations. if (fgGlobalMorph) { /* Check for "op1 - cns2" , we change it to "op1 + (-cns2)" */ noway_assert(op2); if (op2->IsCnsIntOrI() && !op2->IsIconHandle()) { // Negate the constant and change the node to be "+", // except when `op2` is a const byref. op2->AsIntConCommon()->SetIconValue(-op2->AsIntConCommon()->IconValue()); op2->AsIntConRef().gtFieldSeq = FieldSeqStore::NotAField(); oper = GT_ADD; tree->ChangeOper(oper); goto CM_ADD_OP; } /* Check for "cns1 - op2" , we change it to "(cns1 + (-op2))" */ noway_assert(op1); if (op1->IsCnsIntOrI()) { noway_assert(varTypeIsIntOrI(tree)); // The type of the new GT_NEG node cannot just be op2->TypeGet(). // Otherwise we may sign-extend incorrectly in cases where the GT_NEG // node ends up feeding directly into a cast, for example in // GT_CAST<ubyte>(GT_SUB(0, s_1.ubyte)) tree->AsOp()->gtOp2 = op2 = gtNewOperNode(GT_NEG, genActualType(op2->TypeGet()), op2); fgMorphTreeDone(op2); oper = GT_ADD; tree->ChangeOper(oper); goto CM_ADD_OP; } /* No match - exit */ } // Skip optimization if non-NEG operand is constant. // Both op1 and op2 are not constant because it was already checked above. if (opts.OptimizationEnabled() && fgGlobalMorph) { // a - -b = > a + b // SUB(a, (NEG(b)) => ADD(a, b) if (!op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG)) { // tree: SUB // op1: a // op2: NEG // op2Child: b GenTree* op2Child = op2->AsOp()->gtOp1; // b oper = GT_ADD; tree->SetOper(oper, GenTree::PRESERVE_VN); tree->AsOp()->gtOp2 = op2Child; DEBUG_DESTROY_NODE(op2); op2 = op2Child; } // -a - -b = > b - a // SUB(NEG(a), (NEG(b)) => SUB(b, a) else if (op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG) && gtCanSwapOrder(op1, op2)) { // tree: SUB // op1: NEG // op1Child: a // op2: NEG // op2Child: b GenTree* op1Child = op1->AsOp()->gtOp1; // a GenTree* op2Child = op2->AsOp()->gtOp1; // b tree->AsOp()->gtOp1 = op2Child; tree->AsOp()->gtOp2 = op1Child; DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(op2); op1 = op2Child; op2 = op1Child; } } break; #ifdef TARGET_ARM64 case GT_DIV: if (!varTypeIsFloating(tree->gtType)) { // Codegen for this instruction needs to be able to throw two exceptions: fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW); fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO); } break; case GT_UDIV: // Codegen for this instruction needs to be able to throw one exception: fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO); break; #endif case GT_ADD: CM_OVF_OP: if (tree->gtOverflow()) { tree->gtRequestSetFlags(); // Add the excptn-throwing basic block to jump to on overflow fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW); // We can't do any commutative morphing for overflow instructions break; } CM_ADD_OP: FALLTHROUGH; case GT_OR: case GT_XOR: case GT_AND: tree = fgOptimizeCommutativeArithmetic(tree->AsOp()); if (!tree->OperIsSimple()) { return tree; } typ = tree->TypeGet(); oper = tree->OperGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2IfPresent(); break; case GT_NOT: case GT_NEG: // Remove double negation/not. // Note: this is not a safe tranformation if "tree" is a CSE candidate. // Consider for example the following expression: NEG(NEG(OP)), where any // NEG is a CSE candidate. Were we to morph this to just OP, CSE would fail to find // the original NEG in the statement. if (op1->OperIs(oper) && opts.OptimizationEnabled() && !gtIsActiveCSE_Candidate(tree) && !gtIsActiveCSE_Candidate(op1)) { JITDUMP("Remove double negation/not\n") GenTree* op1op1 = op1->gtGetOp1(); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op1); return op1op1; } // Distribute negation over simple multiplication/division expressions if (opts.OptimizationEnabled() && !optValnumCSE_phase && tree->OperIs(GT_NEG) && op1->OperIs(GT_MUL, GT_DIV)) { GenTreeOp* mulOrDiv = op1->AsOp(); GenTree* op1op1 = mulOrDiv->gtGetOp1(); GenTree* op1op2 = mulOrDiv->gtGetOp2(); if (!op1op1->IsCnsIntOrI() && op1op2->IsCnsIntOrI() && !op1op2->IsIconHandle()) { // NEG(MUL(a, C)) => MUL(a, -C) // NEG(DIV(a, C)) => DIV(a, -C), except when C = {-1, 1} ssize_t constVal = op1op2->AsIntCon()->IconValue(); if ((mulOrDiv->OperIs(GT_DIV) && (constVal != -1) && (constVal != 1)) || (mulOrDiv->OperIs(GT_MUL) && !mulOrDiv->gtOverflow())) { GenTree* newOp1 = op1op1; // a GenTree* newOp2 = gtNewIconNode(-constVal, op1op2->TypeGet()); // -C mulOrDiv->gtOp1 = newOp1; mulOrDiv->gtOp2 = newOp2; mulOrDiv->SetVNsFromNode(tree); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op1op2); return mulOrDiv; } } } /* Any constant cases should have been folded earlier */ noway_assert(!op1->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD) || optValnumCSE_phase); break; case GT_CKFINITE: noway_assert(varTypeIsFloating(op1->TypeGet())); fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_ARITH_EXCPN); break; case GT_BOUNDS_CHECK: fgSetRngChkTarget(tree); break; case GT_OBJ: case GT_BLK: case GT_IND: { // If we have IND(ADDR(X)) and X has GTF_GLOB_REF, we must set GTF_GLOB_REF on // the OBJ. Note that the GTF_GLOB_REF will have been cleared on ADDR(X) where X // is a local or CLS_VAR, even if it has been address-exposed. if (op1->OperIs(GT_ADDR)) { tree->gtFlags |= (op1->AsUnOp()->gtGetOp1()->gtFlags & GTF_GLOB_REF); } if (!tree->OperIs(GT_IND)) { break; } // Can not remove a GT_IND if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(tree)) { break; } bool foldAndReturnTemp = false; temp = nullptr; ival1 = 0; // Don't remove a volatile GT_IND, even if the address points to a local variable. if ((tree->gtFlags & GTF_IND_VOLATILE) == 0) { /* Try to Fold *(&X) into X */ if (op1->gtOper == GT_ADDR) { // Can not remove a GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(op1)) { break; } temp = op1->AsOp()->gtOp1; // X // In the test below, if they're both TYP_STRUCT, this of course does *not* mean that // they are the *same* struct type. In fact, they almost certainly aren't. If the // address has an associated field sequence, that identifies this case; go through // the "lcl_fld" path rather than this one. FieldSeqNode* addrFieldSeq = nullptr; // This is an unused out parameter below. if (typ == temp->TypeGet() && !GetZeroOffsetFieldMap()->Lookup(op1, &addrFieldSeq)) { foldAndReturnTemp = true; } else if (temp->OperIsLocal()) { unsigned lclNum = temp->AsLclVarCommon()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); // We will try to optimize when we have a promoted struct promoted with a zero lvFldOffset if (varDsc->lvPromoted && (varDsc->lvFldOffset == 0)) { noway_assert(varTypeIsStruct(varDsc)); // We will try to optimize when we have a single field struct that is being struct promoted if (varDsc->lvFieldCnt == 1) { unsigned lclNumFld = varDsc->lvFieldLclStart; // just grab the promoted field LclVarDsc* fieldVarDsc = lvaGetDesc(lclNumFld); // Also make sure that the tree type matches the fieldVarType and that it's lvFldOffset // is zero if (fieldVarDsc->TypeGet() == typ && (fieldVarDsc->lvFldOffset == 0)) { // We can just use the existing promoted field LclNum temp->AsLclVarCommon()->SetLclNum(lclNumFld); temp->gtType = fieldVarDsc->TypeGet(); foldAndReturnTemp = true; } } } // If the type of the IND (typ) is a "small int", and the type of the local has the // same width, then we can reduce to just the local variable -- it will be // correctly normalized. // // The below transformation cannot be applied if the local var needs to be normalized on load. else if (varTypeIsSmall(typ) && (genTypeSize(varDsc) == genTypeSize(typ)) && !lvaTable[lclNum].lvNormalizeOnLoad()) { const bool definitelyLoad = (tree->gtFlags & GTF_DONT_CSE) == 0; const bool possiblyStore = !definitelyLoad; if (possiblyStore || (varTypeIsUnsigned(varDsc) == varTypeIsUnsigned(typ))) { typ = temp->TypeGet(); tree->gtType = typ; foldAndReturnTemp = true; if (possiblyStore) { // This node can be on the left-hand-side of an assignment node. // Mark this node with GTF_VAR_FOLDED_IND to make sure that fgDoNormalizeOnStore() // is called on its parent in post-order morph. temp->gtFlags |= GTF_VAR_FOLDED_IND; } } } // For matching types we can fold else if (!varTypeIsStruct(typ) && (lvaTable[lclNum].lvType == typ) && !lvaTable[lclNum].lvNormalizeOnLoad()) { tree->gtType = typ = temp->TypeGet(); foldAndReturnTemp = true; } else { // Assumes that when Lookup returns "false" it will leave "fieldSeq" unmodified (i.e. // nullptr) assert(fieldSeq == nullptr); bool b = GetZeroOffsetFieldMap()->Lookup(op1, &fieldSeq); assert(b || fieldSeq == nullptr); if ((fieldSeq != nullptr) && (temp->OperGet() == GT_LCL_FLD)) { // Append the field sequence, change the type. temp->AsLclFld()->SetFieldSeq( GetFieldSeqStore()->Append(temp->AsLclFld()->GetFieldSeq(), fieldSeq)); temp->gtType = typ; foldAndReturnTemp = true; } } // Otherwise will will fold this into a GT_LCL_FLD below // where we check (temp != nullptr) } else // !temp->OperIsLocal() { // We don't try to fold away the GT_IND/GT_ADDR for this case temp = nullptr; } } else if (op1->OperGet() == GT_ADD) { #ifdef TARGET_ARM // Check for a misalignment floating point indirection. if (varTypeIsFloating(typ)) { GenTree* addOp2 = op1->AsOp()->gtGetOp2(); if (addOp2->IsCnsIntOrI()) { ssize_t offset = addOp2->AsIntCon()->gtIconVal; if ((offset % emitTypeSize(TYP_FLOAT)) != 0) { tree->gtFlags |= GTF_IND_UNALIGNED; } } } #endif // TARGET_ARM /* Try to change *(&lcl + cns) into lcl[cns] to prevent materialization of &lcl */ if (op1->AsOp()->gtOp1->OperGet() == GT_ADDR && op1->AsOp()->gtOp2->OperGet() == GT_CNS_INT && opts.OptimizationEnabled()) { // No overflow arithmetic with pointers noway_assert(!op1->gtOverflow()); temp = op1->AsOp()->gtOp1->AsOp()->gtOp1; if (!temp->OperIsLocal()) { temp = nullptr; break; } // Can not remove the GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(op1->AsOp()->gtOp1)) { break; } ival1 = op1->AsOp()->gtOp2->AsIntCon()->gtIconVal; fieldSeq = op1->AsOp()->gtOp2->AsIntCon()->gtFieldSeq; // Does the address have an associated zero-offset field sequence? FieldSeqNode* addrFieldSeq = nullptr; if (GetZeroOffsetFieldMap()->Lookup(op1->AsOp()->gtOp1, &addrFieldSeq)) { fieldSeq = GetFieldSeqStore()->Append(addrFieldSeq, fieldSeq); } if (ival1 == 0 && typ == temp->TypeGet() && temp->TypeGet() != TYP_STRUCT) { noway_assert(!varTypeIsGC(temp->TypeGet())); foldAndReturnTemp = true; } else { // The emitter can't handle large offsets if (ival1 != (unsigned short)ival1) { break; } // The emitter can get confused by invalid offsets if (ival1 >= Compiler::lvaLclSize(temp->AsLclVarCommon()->GetLclNum())) { break; } } // Now we can fold this into a GT_LCL_FLD below // where we check (temp != nullptr) } } } // At this point we may have a lclVar or lclFld that might be foldable with a bit of extra massaging: // - We may have a load of a local where the load has a different type than the local // - We may have a load of a local plus an offset // // In these cases, we will change the lclVar or lclFld into a lclFld of the appropriate type and // offset if doing so is legal. The only cases in which this transformation is illegal are if the load // begins before the local or if the load extends beyond the end of the local (i.e. if the load is // out-of-bounds w.r.t. the local). if ((temp != nullptr) && !foldAndReturnTemp) { assert(temp->OperIsLocal()); const unsigned lclNum = temp->AsLclVarCommon()->GetLclNum(); LclVarDsc* const varDsc = lvaGetDesc(lclNum); const var_types tempTyp = temp->TypeGet(); const bool useExactSize = varTypeIsStruct(tempTyp) || (tempTyp == TYP_BLK) || (tempTyp == TYP_LCLBLK); const unsigned varSize = useExactSize ? varDsc->lvExactSize : genTypeSize(temp); // Make sure we do not enregister this lclVar. lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField)); // If the size of the load is greater than the size of the lclVar, we cannot fold this access into // a lclFld: the access represented by an lclFld node must begin at or after the start of the // lclVar and must not extend beyond the end of the lclVar. if ((ival1 >= 0) && ((ival1 + genTypeSize(typ)) <= varSize)) { GenTreeLclFld* lclFld; // We will turn a GT_LCL_VAR into a GT_LCL_FLD with an gtLclOffs of 'ival' // or if we already have a GT_LCL_FLD we will adjust the gtLclOffs by adding 'ival' // Then we change the type of the GT_LCL_FLD to match the orginal GT_IND type. // if (temp->OperGet() == GT_LCL_FLD) { lclFld = temp->AsLclFld(); lclFld->SetLclOffs(lclFld->GetLclOffs() + static_cast<unsigned>(ival1)); lclFld->SetFieldSeq(GetFieldSeqStore()->Append(lclFld->GetFieldSeq(), fieldSeq)); } else // We have a GT_LCL_VAR. { assert(temp->OperGet() == GT_LCL_VAR); temp->ChangeOper(GT_LCL_FLD); // Note that this makes the gtFieldSeq "NotAField". lclFld = temp->AsLclFld(); lclFld->SetLclOffs(static_cast<unsigned>(ival1)); if (fieldSeq != nullptr) { // If it does represent a field, note that. lclFld->SetFieldSeq(fieldSeq); } } temp->gtType = tree->gtType; foldAndReturnTemp = true; } } if (foldAndReturnTemp) { assert(temp != nullptr); assert(temp->TypeGet() == typ); assert((op1->OperGet() == GT_ADD) || (op1->OperGet() == GT_ADDR)); // Copy the value of GTF_DONT_CSE from the original tree to `temp`: it can be set for // 'temp' because a GT_ADDR always marks it for its operand. temp->gtFlags &= ~GTF_DONT_CSE; temp->gtFlags |= (tree->gtFlags & GTF_DONT_CSE); if (op1->OperGet() == GT_ADD) { DEBUG_DESTROY_NODE(op1->AsOp()->gtOp1); // GT_ADDR DEBUG_DESTROY_NODE(op1->AsOp()->gtOp2); // GT_CNS_INT } DEBUG_DESTROY_NODE(op1); // GT_ADD or GT_ADDR DEBUG_DESTROY_NODE(tree); // GT_IND // If the result of the fold is a local var, we may need to perform further adjustments e.g. for // normalization. if (temp->OperIs(GT_LCL_VAR)) { #ifdef DEBUG // We clear this flag on `temp` because `fgMorphLocalVar` may assert that this bit is clear // and the node in question must have this bit set (as it has already been morphed). temp->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; #endif // DEBUG const bool forceRemorph = true; temp = fgMorphLocalVar(temp, forceRemorph); #ifdef DEBUG // We then set this flag on `temp` because `fgMorhpLocalVar` may not set it itself, and the // caller of `fgMorphSmpOp` may assert that this flag is set on `temp` once this function // returns. temp->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } return temp; } // Only do this optimization when we are in the global optimizer. Doing this after value numbering // could result in an invalid value number for the newly generated GT_IND node. if ((op1->OperGet() == GT_COMMA) && fgGlobalMorph) { // Perform the transform IND(COMMA(x, ..., z)) == COMMA(x, ..., IND(z)). // TBD: this transformation is currently necessary for correctness -- it might // be good to analyze the failures that result if we don't do this, and fix them // in other ways. Ideally, this should be optional. GenTree* commaNode = op1; GenTreeFlags treeFlags = tree->gtFlags; commaNode->gtType = typ; commaNode->gtFlags = (treeFlags & ~GTF_REVERSE_OPS); // Bashing the GT_COMMA flags here is // dangerous, clear the GTF_REVERSE_OPS at // least. #ifdef DEBUG commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif while (commaNode->AsOp()->gtOp2->gtOper == GT_COMMA) { commaNode = commaNode->AsOp()->gtOp2; commaNode->gtType = typ; commaNode->gtFlags = (treeFlags & ~GTF_REVERSE_OPS & ~GTF_ASG & ~GTF_CALL); // Bashing the GT_COMMA flags here is // dangerous, clear the GTF_REVERSE_OPS, GT_ASG, and GT_CALL at // least. commaNode->gtFlags |= ((commaNode->AsOp()->gtOp1->gtFlags | commaNode->AsOp()->gtOp2->gtFlags) & (GTF_ASG | GTF_CALL)); #ifdef DEBUG commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif } bool wasArrIndex = (tree->gtFlags & GTF_IND_ARR_INDEX) != 0; ArrayInfo arrInfo; if (wasArrIndex) { bool b = GetArrayInfoMap()->Lookup(tree, &arrInfo); assert(b); GetArrayInfoMap()->Remove(tree); } tree = op1; GenTree* addr = commaNode->AsOp()->gtOp2; // TODO-1stClassStructs: we often create a struct IND without a handle, fix it. op1 = gtNewIndir(typ, addr); // This is very conservative op1->gtFlags |= treeFlags & ~GTF_ALL_EFFECT & ~GTF_IND_NONFAULTING; op1->gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT); if (wasArrIndex) { GetArrayInfoMap()->Set(op1, arrInfo); } #ifdef DEBUG op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif commaNode->AsOp()->gtOp2 = op1; commaNode->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT); return tree; } break; } case GT_ADDR: // Can not remove op1 if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(op1)) { break; } if (op1->OperGet() == GT_IND) { if ((op1->gtFlags & GTF_IND_ARR_INDEX) == 0) { // Can not remove a GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(tree)) { break; } // Perform the transform ADDR(IND(...)) == (...). GenTree* addr = op1->AsOp()->gtOp1; // If tree has a zero field sequence annotation, update the annotation // on addr node. FieldSeqNode* zeroFieldSeq = nullptr; if (GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq)) { fgAddFieldSeqForZeroOffset(addr, zeroFieldSeq); } noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL); DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(tree); return addr; } } else if (op1->OperGet() == GT_OBJ) { // Can not remove a GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(tree)) { break; } // Perform the transform ADDR(OBJ(...)) == (...). GenTree* addr = op1->AsObj()->Addr(); noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL); DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(tree); return addr; } else if ((op1->gtOper == GT_COMMA) && !optValnumCSE_phase) { // Perform the transform ADDR(COMMA(x, ..., z)) == COMMA(x, ..., ADDR(z)). // (Be sure to mark "z" as an l-value...) ArrayStack<GenTree*> commas(getAllocator(CMK_ArrayStack)); for (GenTree* comma = op1; comma != nullptr && comma->gtOper == GT_COMMA; comma = comma->gtGetOp2()) { commas.Push(comma); } GenTree* commaNode = commas.Top(); // The top-level addr might be annotated with a zeroOffset field. FieldSeqNode* zeroFieldSeq = nullptr; bool isZeroOffset = GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq); tree = op1; commaNode->AsOp()->gtOp2->gtFlags |= GTF_DONT_CSE; // If the node we're about to put under a GT_ADDR is an indirection, it // doesn't need to be materialized, since we only want the addressing mode. Because // of this, this GT_IND is not a faulting indirection and we don't have to extract it // as a side effect. GenTree* commaOp2 = commaNode->AsOp()->gtOp2; if (commaOp2->OperIsBlk()) { commaOp2->SetOper(GT_IND); } if (commaOp2->gtOper == GT_IND) { commaOp2->gtFlags |= GTF_IND_NONFAULTING; commaOp2->gtFlags &= ~GTF_EXCEPT; commaOp2->gtFlags |= (commaOp2->AsOp()->gtOp1->gtFlags & GTF_EXCEPT); } op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, commaOp2); if (isZeroOffset) { // Transfer the annotation to the new GT_ADDR node. fgAddFieldSeqForZeroOffset(op1, zeroFieldSeq); } commaNode->AsOp()->gtOp2 = op1; // Originally, I gave all the comma nodes type "byref". But the ADDR(IND(x)) == x transform // might give op1 a type different from byref (like, say, native int). So now go back and give // all the comma nodes the type of op1. // TODO: the comma flag update below is conservative and can be improved. // For example, if we made the ADDR(IND(x)) == x transformation, we may be able to // get rid of some of the IND flags on the COMMA nodes (e.g., GTF_GLOB_REF). while (!commas.Empty()) { GenTree* comma = commas.Pop(); comma->gtType = op1->gtType; comma->gtFlags |= op1->gtFlags; #ifdef DEBUG comma->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif gtUpdateNodeSideEffects(comma); } return tree; } break; case GT_COLON: if (fgGlobalMorph) { /* Mark the nodes that are conditionally executed */ fgWalkTreePre(&tree, gtMarkColonCond); } /* Since we're doing this postorder we clear this if it got set by a child */ fgRemoveRestOfBlock = false; break; case GT_COMMA: /* Special case: trees that don't produce a value */ if (op2->OperIs(GT_ASG) || (op2->OperGet() == GT_COMMA && op2->TypeGet() == TYP_VOID) || fgIsThrow(op2)) { typ = tree->gtType = TYP_VOID; } // If we are in the Valuenum CSE phase then don't morph away anything as these // nodes may have CSE defs/uses in them. // if (!optValnumCSE_phase) { // Extract the side effects from the left side of the comma. Since they don't "go" anywhere, this // is all we need. GenTree* op1SideEffects = nullptr; // The addition of "GTF_MAKE_CSE" below prevents us from throwing away (for example) // hoisted expressions in loops. gtExtractSideEffList(op1, &op1SideEffects, (GTF_SIDE_EFFECT | GTF_MAKE_CSE)); if (op1SideEffects) { // Replace the left hand side with the side effect list. op1 = op1SideEffects; tree->AsOp()->gtOp1 = op1SideEffects; gtUpdateNodeSideEffects(tree); } else { op2->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG)); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op1); return op2; } // If the right operand is just a void nop node, throw it away. Unless this is a // comma throw, in which case we want the top-level morphing loop to recognize it. if (op2->IsNothingNode() && op1->TypeIs(TYP_VOID) && !fgIsCommaThrow(tree)) { op1->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG)); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op2); return op1; } } break; case GT_JTRUE: /* Special case if fgRemoveRestOfBlock is set to true */ if (fgRemoveRestOfBlock) { if (fgIsCommaThrow(op1, true)) { GenTree* throwNode = op1->AsOp()->gtOp1; JITDUMP("Removing [%06d] GT_JTRUE as the block now unconditionally throws an exception.\n", dspTreeID(tree)); DEBUG_DESTROY_NODE(tree); return throwNode; } noway_assert(op1->OperIsCompare()); noway_assert(op1->gtFlags & GTF_EXCEPT); // We need to keep op1 for the side-effects. Hang it off // a GT_COMMA node JITDUMP("Keeping side-effects by bashing [%06d] GT_JTRUE into a GT_COMMA.\n", dspTreeID(tree)); tree->ChangeOper(GT_COMMA); tree->AsOp()->gtOp2 = op2 = gtNewNothingNode(); // Additionally since we're eliminating the JTRUE // codegen won't like it if op1 is a RELOP of longs, floats or doubles. // So we change it into a GT_COMMA as well. JITDUMP("Also bashing [%06d] (a relop) into a GT_COMMA.\n", dspTreeID(op1)); op1->ChangeOper(GT_COMMA); op1->gtFlags &= ~GTF_UNSIGNED; // Clear the unsigned flag if it was set on the relop op1->gtType = op1->AsOp()->gtOp1->gtType; return tree; } break; case GT_INTRINSIC: if (tree->AsIntrinsic()->gtIntrinsicName == NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant) { // Should be expanded by the time it reaches CSE phase assert(!optValnumCSE_phase); JITDUMP("\nExpanding RuntimeHelpers.IsKnownConstant to "); if (op1->OperIsConst()) { // We're lucky to catch a constant here while importer was not JITDUMP("true\n"); DEBUG_DESTROY_NODE(tree, op1); tree = gtNewIconNode(1); } else { GenTree* op1SideEffects = nullptr; gtExtractSideEffList(op1, &op1SideEffects, GTF_ALL_EFFECT); if (op1SideEffects != nullptr) { DEBUG_DESTROY_NODE(tree); // Keep side-effects of op1 tree = gtNewOperNode(GT_COMMA, TYP_INT, op1SideEffects, gtNewIconNode(0)); JITDUMP("false with side effects:\n") DISPTREE(tree); } else { JITDUMP("false\n"); DEBUG_DESTROY_NODE(tree, op1); tree = gtNewIconNode(0); } } INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return tree; } break; default: break; } assert(oper == tree->gtOper); // Propagate comma throws. // If we are in the Valuenum CSE phase then don't morph away anything as these // nodes may have CSE defs/uses in them. if (fgGlobalMorph && (oper != GT_ASG) && (oper != GT_COLON)) { if ((op1 != nullptr) && fgIsCommaThrow(op1, true)) { GenTree* propagatedThrow = fgPropagateCommaThrow(tree, op1->AsOp(), GTF_EMPTY); if (propagatedThrow != nullptr) { return propagatedThrow; } } if ((op2 != nullptr) && fgIsCommaThrow(op2, true)) { GenTree* propagatedThrow = fgPropagateCommaThrow(tree, op2->AsOp(), op1->gtFlags & GTF_ALL_EFFECT); if (propagatedThrow != nullptr) { return propagatedThrow; } } } /*------------------------------------------------------------------------- * Optional morphing is done if tree transformations is permitted */ if ((opts.compFlags & CLFLG_TREETRANS) == 0) { return tree; } tree = fgMorphSmpOpOptional(tree->AsOp()); return tree; } //------------------------------------------------------------------------ // fgOptimizeCast: Optimizes the supplied GT_CAST tree. // // Tries to get rid of the cast, its operand, the GTF_OVERFLOW flag, calls // calls "optNarrowTree". Called in post-order by "fgMorphSmpOp". // // Arguments: // tree - the cast tree to optimize // // Return Value: // The optimized tree (that can have any shape). // GenTree* Compiler::fgOptimizeCast(GenTreeCast* cast) { GenTree* src = cast->CastOp(); if (gtIsActiveCSE_Candidate(cast) || gtIsActiveCSE_Candidate(src)) { return cast; } // See if we can discard the cast. if (varTypeIsIntegral(cast) && varTypeIsIntegral(src)) { IntegralRange srcRange = IntegralRange::ForNode(src, this); IntegralRange noOvfRange = IntegralRange::ForCastInput(cast); if (noOvfRange.Contains(srcRange)) { // Casting between same-sized types is a no-op, // given we have proven this cast cannot overflow. if (genActualType(cast) == genActualType(src)) { return src; } cast->ClearOverflow(); cast->SetAllEffectsFlags(src); // Try and see if we can make this cast into a cheaper zero-extending version. if (genActualTypeIsInt(src) && cast->TypeIs(TYP_LONG) && srcRange.IsPositive()) { cast->SetUnsigned(); } } // For checked casts, we're done. if (cast->gtOverflow()) { return cast; } var_types castToType = cast->CastToType(); // For indir-like nodes, we may be able to change their type to satisfy (and discard) the cast. if (varTypeIsSmall(castToType) && (genTypeSize(castToType) == genTypeSize(src)) && src->OperIs(GT_IND, GT_CLS_VAR, GT_LCL_FLD)) { // We're changing the type here so we need to update the VN; // in other cases we discard the cast without modifying src // so the VN doesn't change. src->ChangeType(castToType); src->SetVNsFromNode(cast); return src; } // Try to narrow the operand of the cast and discard the cast. if (opts.OptEnabled(CLFLG_TREETRANS) && (genTypeSize(src) > genTypeSize(castToType)) && optNarrowTree(src, src->TypeGet(), castToType, cast->gtVNPair, false)) { optNarrowTree(src, src->TypeGet(), castToType, cast->gtVNPair, true); // "optNarrowTree" may leave a dead cast behind. if (src->OperIs(GT_CAST) && (src->AsCast()->CastToType() == genActualType(src->AsCast()->CastOp()))) { src = src->AsCast()->CastOp(); } return src; } // Check for two consecutive casts, we may be able to discard the intermediate one. if (opts.OptimizationEnabled() && src->OperIs(GT_CAST) && !src->gtOverflow()) { var_types dstCastToType = castToType; var_types srcCastToType = src->AsCast()->CastToType(); // CAST(ubyte <- CAST(short <- X)): CAST(ubyte <- X). // CAST(ushort <- CAST(short <- X)): CAST(ushort <- X). if (varTypeIsSmall(srcCastToType) && (genTypeSize(dstCastToType) <= genTypeSize(srcCastToType))) { cast->CastOp() = src->AsCast()->CastOp(); DEBUG_DESTROY_NODE(src); } } } return cast; } //------------------------------------------------------------------------ // fgOptimizeEqualityComparisonWithConst: optimizes various EQ/NE(OP, CONST) patterns. // // Arguments: // cmp - The GT_NE/GT_EQ tree the second operand of which is an integral constant // // Return Value: // The optimized tree, "cmp" in case no optimizations were done. // Currently only returns relop trees. // GenTree* Compiler::fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp) { assert(cmp->OperIs(GT_EQ, GT_NE)); assert(cmp->gtGetOp2()->IsIntegralConst()); assert(!optValnumCSE_phase); GenTree* op1 = cmp->gtGetOp1(); GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon(); // Check for "(expr +/- icon1) ==/!= (non-zero-icon2)". if (op2->IsCnsIntOrI() && (op2->IconValue() != 0)) { // Since this can occur repeatedly we use a while loop. while (op1->OperIs(GT_ADD, GT_SUB) && op1->AsOp()->gtGetOp2()->IsCnsIntOrI() && op1->TypeIs(TYP_INT) && !op1->gtOverflow()) { // Got it; change "x + icon1 == icon2" to "x == icon2 - icon1". ssize_t op1Value = op1->AsOp()->gtGetOp2()->AsIntCon()->IconValue(); ssize_t op2Value = op2->IconValue(); if (op1->OperIs(GT_ADD)) { op2Value -= op1Value; } else { op2Value += op1Value; } op1 = op1->AsOp()->gtGetOp1(); op2->SetIconValue(static_cast<int32_t>(op2Value)); } cmp->gtOp1 = op1; fgUpdateConstTreeValueNumber(op2); } // Here we look for the following tree // // EQ/NE // / \. // op1 CNS 0/1 // if (op2->IsIntegralConst(0) || op2->IsIntegralConst(1)) { ssize_t op2Value = static_cast<ssize_t>(op2->IntegralValue()); if (op1->OperIsCompare()) { // Here we look for the following tree // // EQ/NE -> RELOP/!RELOP // / \ / \. // RELOP CNS 0/1 // / \. // // Note that we will remove/destroy the EQ/NE node and move // the RELOP up into it's location. // Here we reverse the RELOP if necessary. bool reverse = ((op2Value == 0) == (cmp->OperIs(GT_EQ))); if (reverse) { gtReverseCond(op1); } noway_assert((op1->gtFlags & GTF_RELOP_JMP_USED) == 0); op1->gtFlags |= cmp->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE); op1->SetVNsFromNode(cmp); DEBUG_DESTROY_NODE(cmp); return op1; } // // Now we check for a compare with the result of an '&' operator // // Here we look for the following transformation: // // EQ/NE EQ/NE // / \ / \. // AND CNS 0/1 -> AND CNS 0 // / \ / \. // RSZ/RSH CNS 1 x CNS (1 << y) // / \. // x CNS_INT +y if (fgGlobalMorph && op1->OperIs(GT_AND) && op1->AsOp()->gtGetOp1()->OperIs(GT_RSZ, GT_RSH)) { GenTreeOp* andOp = op1->AsOp(); GenTreeOp* rshiftOp = andOp->gtGetOp1()->AsOp(); if (!rshiftOp->gtGetOp2()->IsCnsIntOrI()) { goto SKIP; } ssize_t shiftAmount = rshiftOp->gtGetOp2()->AsIntCon()->IconValue(); if (shiftAmount < 0) { goto SKIP; } if (!andOp->gtGetOp2()->IsIntegralConst(1)) { goto SKIP; } GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon(); if (andOp->TypeIs(TYP_INT)) { if (shiftAmount > 31) { goto SKIP; } andMask->SetIconValue(static_cast<int32_t>(1 << shiftAmount)); // Reverse the condition if necessary. if (op2Value == 1) { gtReverseCond(cmp); op2->SetIconValue(0); } } else if (andOp->TypeIs(TYP_LONG)) { if (shiftAmount > 63) { goto SKIP; } andMask->SetLngValue(1ll << shiftAmount); // Reverse the cond if necessary if (op2Value == 1) { gtReverseCond(cmp); op2->SetLngValue(0); } } andOp->gtOp1 = rshiftOp->gtGetOp1(); DEBUG_DESTROY_NODE(rshiftOp->gtGetOp2()); DEBUG_DESTROY_NODE(rshiftOp); } } SKIP: // Now check for compares with small constant longs that can be cast to int. // Note that we filter out negative values here so that the transformations // below are correct. E. g. "EQ(-1L, CAST_UN(int))" is always "false", but were // we to make it into "EQ(-1, int)", "true" becomes possible for negative inputs. if (!op2->TypeIs(TYP_LONG) || ((op2->LngValue() >> 31) != 0)) { return cmp; } if (!op1->OperIs(GT_AND)) { // Another interesting case: cast from int. if (op1->OperIs(GT_CAST) && op1->AsCast()->CastOp()->TypeIs(TYP_INT) && !op1->gtOverflow()) { // Simply make this into an integer comparison. cmp->gtOp1 = op1->AsCast()->CastOp(); op2->BashToConst(static_cast<int32_t>(op2->LngValue())); fgUpdateConstTreeValueNumber(op2); } return cmp; } // Now we perform the following optimization: // EQ/NE(AND(OP long, CNS_LNG), CNS_LNG) => // EQ/NE(AND(CAST(int <- OP), CNS_INT), CNS_INT) // when the constants are sufficiently small. // This transform cannot preserve VNs. if (fgGlobalMorph) { assert(op1->TypeIs(TYP_LONG) && op1->OperIs(GT_AND)); // Is the result of the mask effectively an INT? GenTreeOp* andOp = op1->AsOp(); if (!andOp->gtGetOp2()->OperIs(GT_CNS_NATIVELONG)) { return cmp; } GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon(); if ((andMask->LngValue() >> 32) != 0) { return cmp; } // Now we narrow the first operand of AND to int. if (optNarrowTree(andOp->gtGetOp1(), TYP_LONG, TYP_INT, ValueNumPair(), false)) { optNarrowTree(andOp->gtGetOp1(), TYP_LONG, TYP_INT, ValueNumPair(), true); } else { andOp->gtOp1 = gtNewCastNode(TYP_INT, andOp->gtGetOp1(), false, TYP_INT); } assert(andMask == andOp->gtGetOp2()); // Now replace the mask node. andMask->BashToConst(static_cast<int32_t>(andMask->LngValue())); // Now change the type of the AND node. andOp->ChangeType(TYP_INT); // Finally we replace the comparand. op2->BashToConst(static_cast<int32_t>(op2->LngValue())); } return cmp; } //------------------------------------------------------------------------ // fgOptimizeRelationalComparisonWithConst: optimizes a comparison operation. // // Recognizes comparisons against various constant operands and morphs // them, if possible, into comparisons against zero. // // Arguments: // cmp - the GT_LE/GT_LT/GT_GE/GT_GT tree to morph. // // Return Value: // The "cmp" tree, possibly with a modified oper. // The second operand's constant value may be modified as well. // // Assumptions: // The operands have been swapped so that any constants are on the right. // The second operand is an integral constant. // GenTree* Compiler::fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp) { assert(cmp->OperIs(GT_LE, GT_LT, GT_GE, GT_GT)); assert(cmp->gtGetOp2()->IsIntegralConst()); assert(!gtIsActiveCSE_Candidate(cmp->gtGetOp2())); GenTree* op1 = cmp->gtGetOp1(); GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon(); assert(genActualType(op1) == genActualType(op2)); genTreeOps oper = cmp->OperGet(); int64_t op2Value = op2->IntegralValue(); if (op2Value == 1) { // Check for "expr >= 1". if (oper == GT_GE) { // Change to "expr != 0" for unsigned and "expr > 0" for signed. oper = cmp->IsUnsigned() ? GT_NE : GT_GT; } // Check for "expr < 1". else if (oper == GT_LT) { // Change to "expr == 0" for unsigned and "expr <= 0". oper = cmp->IsUnsigned() ? GT_EQ : GT_LE; } } // Check for "expr relop -1". else if (!cmp->IsUnsigned() && (op2Value == -1)) { // Check for "expr <= -1". if (oper == GT_LE) { // Change to "expr < 0". oper = GT_LT; } // Check for "expr > -1". else if (oper == GT_GT) { // Change to "expr >= 0". oper = GT_GE; } } else if (cmp->IsUnsigned()) { if ((oper == GT_LE) || (oper == GT_GT)) { if (op2Value == 0) { // IL doesn't have a cne instruction so compilers use cgt.un instead. The JIT // recognizes certain patterns that involve GT_NE (e.g (x & 4) != 0) and fails // if GT_GT is used instead. Transform (x GT_GT.unsigned 0) into (x GT_NE 0) // and (x GT_LE.unsigned 0) into (x GT_EQ 0). The later case is rare, it sometimes // occurs as a result of branch inversion. oper = (oper == GT_LE) ? GT_EQ : GT_NE; cmp->gtFlags &= ~GTF_UNSIGNED; } // LE_UN/GT_UN(expr, int/long.MaxValue) => GE/LT(expr, 0). else if (((op1->TypeIs(TYP_LONG) && (op2Value == INT64_MAX))) || ((genActualType(op1) == TYP_INT) && (op2Value == INT32_MAX))) { oper = (oper == GT_LE) ? GT_GE : GT_LT; cmp->gtFlags &= ~GTF_UNSIGNED; } } } if (!cmp->OperIs(oper)) { // Keep the old ValueNumber for 'tree' as the new expr // will still compute the same value as before. cmp->SetOper(oper, GenTree::PRESERVE_VN); op2->SetIntegralValue(0); fgUpdateConstTreeValueNumber(op2); } return cmp; } #ifdef FEATURE_HW_INTRINSICS //------------------------------------------------------------------------ // fgOptimizeHWIntrinsic: optimize a HW intrinsic node // // Arguments: // node - HWIntrinsic node to examine // // Returns: // The original node if no optimization happened or if tree bashing occured. // An alternative tree if an optimization happened. // // Notes: // Checks for HWIntrinsic nodes: Vector64.Create/Vector128.Create/Vector256.Create, // and if the call is one of these, attempt to optimize. // This is post-order, meaning that it will not morph the children. // GenTree* Compiler::fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node) { assert(!optValnumCSE_phase); if (opts.OptimizationDisabled()) { return node; } switch (node->GetHWIntrinsicId()) { case NI_Vector128_Create: #if defined(TARGET_XARCH) case NI_Vector256_Create: #elif defined(TARGET_ARM64) case NI_Vector64_Create: #endif { bool hwAllArgsAreConstZero = true; for (GenTree* arg : node->Operands()) { if (!arg->IsIntegralConst(0) && !arg->IsFloatPositiveZero()) { hwAllArgsAreConstZero = false; break; } } if (hwAllArgsAreConstZero) { switch (node->GetHWIntrinsicId()) { case NI_Vector128_Create: { node->ResetHWIntrinsicId(NI_Vector128_get_Zero); break; } #if defined(TARGET_XARCH) case NI_Vector256_Create: { node->ResetHWIntrinsicId(NI_Vector256_get_Zero); break; } #elif defined(TARGET_ARM64) case NI_Vector64_Create: { node->ResetHWIntrinsicId(NI_Vector64_get_Zero); break; } #endif default: unreached(); } } break; } default: break; } return node; } #endif //------------------------------------------------------------------------ // fgOptimizeCommutativeArithmetic: Optimizes commutative operations. // // Arguments: // tree - the unchecked GT_ADD/GT_MUL/GT_OR/GT_XOR/GT_AND tree to optimize. // // Return Value: // The optimized tree that can have any shape. // GenTree* Compiler::fgOptimizeCommutativeArithmetic(GenTreeOp* tree) { assert(tree->OperIs(GT_ADD, GT_MUL, GT_OR, GT_XOR, GT_AND)); assert(!tree->gtOverflowEx()); // Commute constants to the right. if (tree->gtGetOp1()->OperIsConst() && !tree->gtGetOp1()->TypeIs(TYP_REF)) { // TODO-Review: We used to assert here that "(!op2->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD))". // This may indicate a missed "remorph". Task is to re-enable this assertion and investigate. std::swap(tree->gtOp1, tree->gtOp2); } if (fgOperIsBitwiseRotationRoot(tree->OperGet())) { GenTree* rotationTree = fgRecognizeAndMorphBitwiseRotation(tree); if (rotationTree != nullptr) { return rotationTree; } } if (fgGlobalMorph && tree->OperIs(GT_AND, GT_OR, GT_XOR)) { GenTree* castTree = fgMorphCastedBitwiseOp(tree->AsOp()); if (castTree != nullptr) { return castTree; } } if (varTypeIsIntegralOrI(tree)) { genTreeOps oldTreeOper = tree->OperGet(); GenTreeOp* optimizedTree = fgMorphCommutative(tree->AsOp()); if (optimizedTree != nullptr) { if (!optimizedTree->OperIs(oldTreeOper)) { // "optimizedTree" could end up being a COMMA. return optimizedTree; } tree = optimizedTree; } } if (!optValnumCSE_phase) { GenTree* optimizedTree = nullptr; if (tree->OperIs(GT_ADD)) { optimizedTree = fgOptimizeAddition(tree); } else if (tree->OperIs(GT_MUL)) { optimizedTree = fgOptimizeMultiply(tree); } else if (tree->OperIs(GT_AND)) { optimizedTree = fgOptimizeBitwiseAnd(tree); } if (optimizedTree != nullptr) { return optimizedTree; } } return tree; } //------------------------------------------------------------------------ // fgOptimizeAddition: optimizes addition. // // Arguments: // add - the unchecked GT_ADD tree to optimize. // // Return Value: // The optimized tree, that can have any shape, in case any transformations // were performed. Otherwise, "nullptr", guaranteeing no state change. // GenTree* Compiler::fgOptimizeAddition(GenTreeOp* add) { assert(add->OperIs(GT_ADD) && !add->gtOverflow()); assert(!optValnumCSE_phase); GenTree* op1 = add->gtGetOp1(); GenTree* op2 = add->gtGetOp2(); // Fold "((x + icon1) + (y + icon2))" to ((x + y) + (icon1 + icon2))". // Be careful not to create a byref pointer that may point outside of the ref object. // Only do this in global morph as we don't recompute the VN for "(x + y)", the new "op2". if (op1->OperIs(GT_ADD) && op2->OperIs(GT_ADD) && !op1->gtOverflow() && !op2->gtOverflow() && op1->AsOp()->gtGetOp2()->IsCnsIntOrI() && op2->AsOp()->gtGetOp2()->IsCnsIntOrI() && !varTypeIsGC(op1->AsOp()->gtGetOp1()) && !varTypeIsGC(op2->AsOp()->gtGetOp1()) && fgGlobalMorph) { GenTreeOp* addOne = op1->AsOp(); GenTreeOp* addTwo = op2->AsOp(); GenTreeIntCon* constOne = addOne->gtGetOp2()->AsIntCon(); GenTreeIntCon* constTwo = addTwo->gtGetOp2()->AsIntCon(); addOne->gtOp2 = addTwo->gtGetOp1(); addOne->SetAllEffectsFlags(addOne->gtGetOp1(), addOne->gtGetOp2()); DEBUG_DESTROY_NODE(addTwo); constOne->SetValueTruncating(constOne->IconValue() + constTwo->IconValue()); op2 = constOne; add->gtOp2 = constOne; DEBUG_DESTROY_NODE(constTwo); } // Fold (x + 0) - given it won't change the tree type to TYP_REF. // TODO-Bug: this code will lose the GC-ness of a tree like "native int + byref(0)". if (op2->IsIntegralConst(0) && ((add->TypeGet() == op1->TypeGet()) || !op1->TypeIs(TYP_REF))) { if (op2->IsCnsIntOrI() && varTypeIsI(op1)) { fgAddFieldSeqForZeroOffset(op1, op2->AsIntCon()->gtFieldSeq); } DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(add); return op1; } // Note that these transformations are legal for floating-point ADDs as well. if (opts.OptimizationEnabled()) { // - a + b = > b - a // ADD((NEG(a), b) => SUB(b, a) // Do not do this if "op2" is constant for canonicalization purposes. if (op1->OperIs(GT_NEG) && !op2->OperIs(GT_NEG) && !op2->IsIntegralConst() && gtCanSwapOrder(op1, op2)) { add->SetOper(GT_SUB); add->gtOp1 = op2; add->gtOp2 = op1->AsOp()->gtGetOp1(); DEBUG_DESTROY_NODE(op1); return add; } // a + -b = > a - b // ADD(a, (NEG(b)) => SUB(a, b) if (!op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG)) { add->SetOper(GT_SUB); add->gtOp2 = op2->AsOp()->gtGetOp1(); DEBUG_DESTROY_NODE(op2); return add; } } return nullptr; } //------------------------------------------------------------------------ // fgOptimizeMultiply: optimizes multiplication. // // Arguments: // mul - the unchecked TYP_I_IMPL/TYP_INT GT_MUL tree to optimize. // // Return Value: // The optimized tree, that can have any shape, in case any transformations // were performed. Otherwise, "nullptr", guaranteeing no state change. // GenTree* Compiler::fgOptimizeMultiply(GenTreeOp* mul) { assert(mul->OperIs(GT_MUL)); assert(varTypeIsIntOrI(mul) || varTypeIsFloating(mul)); assert(!mul->gtOverflow()); assert(!optValnumCSE_phase); GenTree* op1 = mul->gtGetOp1(); GenTree* op2 = mul->gtGetOp2(); assert(mul->TypeGet() == genActualType(op1)); assert(mul->TypeGet() == genActualType(op2)); if (opts.OptimizationEnabled() && op2->IsCnsFltOrDbl()) { double multiplierValue = op2->AsDblCon()->gtDconVal; if (multiplierValue == 1.0) { // Fold "x * 1.0" to "x". DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(mul); return op1; } // Fold "x * 2.0" to "x + x". // If op1 is not a local we will have to introduce a temporary via GT_COMMA. // Unfortunately, it's not optHoistLoopCode-friendly (yet), so we'll only do // this for locals / after hoisting has run (when rationalization remorphs // math INTRINSICSs into calls...). if ((multiplierValue == 2.0) && (op1->IsLocal() || (fgOrder == FGOrderLinear))) { op2 = fgMakeMultiUse(&op1); GenTree* add = gtNewOperNode(GT_ADD, mul->TypeGet(), op1, op2); INDEBUG(add->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return add; } } if (op2->IsIntegralConst()) { ssize_t mult = op2->AsIntConCommon()->IconValue(); bool op2IsConstIndex = op2->OperGet() == GT_CNS_INT && op2->AsIntCon()->gtFieldSeq != nullptr && op2->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq(); assert(!op2IsConstIndex || op2->AsIntCon()->gtFieldSeq->m_next == nullptr); if (mult == 0) { // We may be able to throw away op1 (unless it has side-effects) if ((op1->gtFlags & GTF_SIDE_EFFECT) == 0) { DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(mul); return op2; // Just return the "0" node } // We need to keep op1 for the side-effects. Hang it off a GT_COMMA node. mul->ChangeOper(GT_COMMA, GenTree::PRESERVE_VN); return mul; } #ifdef TARGET_XARCH // Should we try to replace integer multiplication with lea/add/shift sequences? bool mulShiftOpt = compCodeOpt() != SMALL_CODE; #else // !TARGET_XARCH bool mulShiftOpt = false; #endif // !TARGET_XARCH size_t abs_mult = (mult >= 0) ? mult : -mult; size_t lowestBit = genFindLowestBit(abs_mult); bool changeToShift = false; // is it a power of two? (positive or negative) if (abs_mult == lowestBit) { // if negative negate (min-int does not need negation) if (mult < 0 && mult != SSIZE_T_MIN) { op1 = gtNewOperNode(GT_NEG, genActualType(op1), op1); mul->gtOp1 = op1; fgMorphTreeDone(op1); } // If "op2" is a constant array index, the other multiplicand must be a constant. // Transfer the annotation to the other one. if (op2->OperGet() == GT_CNS_INT && op2->AsIntCon()->gtFieldSeq != nullptr && op2->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq()) { assert(op2->AsIntCon()->gtFieldSeq->m_next == nullptr); GenTree* otherOp = op1; if (otherOp->OperGet() == GT_NEG) { otherOp = otherOp->AsOp()->gtOp1; } assert(otherOp->OperGet() == GT_CNS_INT); assert(otherOp->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField()); otherOp->AsIntCon()->gtFieldSeq = op2->AsIntCon()->gtFieldSeq; } if (abs_mult == 1) { DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(mul); return op1; } // Change the multiplication into a shift by log2(val) bits. op2->AsIntConCommon()->SetIconValue(genLog2(abs_mult)); changeToShift = true; } else if (mulShiftOpt && (lowestBit > 1) && jitIsScaleIndexMul(lowestBit)) { int shift = genLog2(lowestBit); ssize_t factor = abs_mult >> shift; if (factor == 3 || factor == 5 || factor == 9) { // if negative negate (min-int does not need negation) if (mult < 0 && mult != SSIZE_T_MIN) { op1 = gtNewOperNode(GT_NEG, genActualType(op1), op1); mul->gtOp1 = op1; fgMorphTreeDone(op1); } GenTree* factorIcon = gtNewIconNode(factor, mul->TypeGet()); if (op2IsConstIndex) { factorIcon->AsIntCon()->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField); } // change the multiplication into a smaller multiplication (by 3, 5 or 9) and a shift op1 = gtNewOperNode(GT_MUL, mul->TypeGet(), op1, factorIcon); mul->gtOp1 = op1; fgMorphTreeDone(op1); op2->AsIntConCommon()->SetIconValue(shift); changeToShift = true; } } if (changeToShift) { fgUpdateConstTreeValueNumber(op2); mul->ChangeOper(GT_LSH, GenTree::PRESERVE_VN); return mul; } } return nullptr; } //------------------------------------------------------------------------ // fgOptimizeBitwiseAnd: optimizes the "and" operation. // // Arguments: // andOp - the GT_AND tree to optimize. // // Return Value: // The optimized tree, currently always a relop, in case any transformations // were performed. Otherwise, "nullptr", guaranteeing no state change. // GenTree* Compiler::fgOptimizeBitwiseAnd(GenTreeOp* andOp) { assert(andOp->OperIs(GT_AND)); assert(!optValnumCSE_phase); GenTree* op1 = andOp->gtGetOp1(); GenTree* op2 = andOp->gtGetOp2(); // Fold "cmp & 1" to just "cmp". if (andOp->TypeIs(TYP_INT) && op1->OperIsCompare() && op2->IsIntegralConst(1)) { DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(andOp); return op1; } return nullptr; } //------------------------------------------------------------------------ // fgOptimizeRelationalComparisonWithCasts: Recognizes comparisons against // various cast operands and tries to remove them. E.g.: // // * GE int // +--* CAST long <- ulong <- uint // | \--* X int // \--* CNS_INT long // // to: // // * GE_un int // +--* X int // \--* CNS_INT int // // same for: // // * GE int // +--* CAST long <- ulong <- uint // | \--* X int // \--* CAST long <- [u]long <- int // \--* ARR_LEN int // // These patterns quite often show up along with index checks // // Arguments: // cmp - the GT_LE/GT_LT/GT_GE/GT_GT tree to morph. // // Return Value: // Returns the same tree where operands might have narrower types // // Notes: // TODO-Casts: consider unifying this function with "optNarrowTree" // GenTree* Compiler::fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp) { assert(cmp->OperIs(GT_LE, GT_LT, GT_GE, GT_GT)); assert(!optValnumCSE_phase); GenTree* op1 = cmp->gtGetOp1(); GenTree* op2 = cmp->gtGetOp2(); // Caller is expected to call this function only if we have CAST nodes assert(op1->OperIs(GT_CAST) || op2->OperIs(GT_CAST)); if (!op1->TypeIs(TYP_LONG)) { // We can extend this logic to handle small types as well, but currently it's done mostly to // assist range check elimination return cmp; } GenTree* castOp; GenTree* knownPositiveOp; bool knownPositiveIsOp2; if (op2->IsIntegralConst() || ((op2->OperIs(GT_CAST) && op2->AsCast()->CastOp()->OperIs(GT_ARR_LENGTH)))) { // op2 is either a LONG constant or (T)ARR_LENGTH knownPositiveIsOp2 = true; castOp = cmp->gtGetOp1(); knownPositiveOp = cmp->gtGetOp2(); } else { // op1 is either a LONG constant (yes, it's pretty normal for relops) // or (T)ARR_LENGTH castOp = cmp->gtGetOp2(); knownPositiveOp = cmp->gtGetOp1(); knownPositiveIsOp2 = false; } if (castOp->OperIs(GT_CAST) && varTypeIsLong(castOp->CastToType()) && castOp->AsCast()->CastOp()->TypeIs(TYP_INT) && castOp->IsUnsigned() && !castOp->gtOverflow()) { bool knownPositiveFitsIntoU32 = false; if (knownPositiveOp->IsIntegralConst() && FitsIn<UINT32>(knownPositiveOp->AsIntConCommon()->IntegralValue())) { // BTW, we can fold the whole condition if op2 doesn't fit into UINT_MAX. knownPositiveFitsIntoU32 = true; } else if (knownPositiveOp->OperIs(GT_CAST) && varTypeIsLong(knownPositiveOp->CastToType()) && knownPositiveOp->AsCast()->CastOp()->OperIs(GT_ARR_LENGTH)) { knownPositiveFitsIntoU32 = true; // TODO-Casts: recognize Span.Length here as well. } if (!knownPositiveFitsIntoU32) { return cmp; } JITDUMP("Removing redundant cast(s) for:\n") DISPTREE(cmp) JITDUMP("\n\nto:\n\n") cmp->SetUnsigned(); // Drop cast from castOp if (knownPositiveIsOp2) { cmp->gtOp1 = castOp->AsCast()->CastOp(); } else { cmp->gtOp2 = castOp->AsCast()->CastOp(); } DEBUG_DESTROY_NODE(castOp); if (knownPositiveOp->OperIs(GT_CAST)) { // Drop cast from knownPositiveOp too if (knownPositiveIsOp2) { cmp->gtOp2 = knownPositiveOp->AsCast()->CastOp(); } else { cmp->gtOp1 = knownPositiveOp->AsCast()->CastOp(); } DEBUG_DESTROY_NODE(knownPositiveOp); } else { // Change type for constant from LONG to INT knownPositiveOp->ChangeType(TYP_INT); #ifndef TARGET_64BIT assert(knownPositiveOp->OperIs(GT_CNS_LNG)); knownPositiveOp->BashToConst(static_cast<int>(knownPositiveOp->AsIntConCommon()->IntegralValue())); #endif fgUpdateConstTreeValueNumber(knownPositiveOp); } DISPTREE(cmp) JITDUMP("\n") } return cmp; } //------------------------------------------------------------------------ // fgPropagateCommaThrow: propagate a "comma throw" up the tree. // // "Comma throws" in the compiler represent the canonical form of an always // throwing expression. They have the shape of COMMA(THROW, ZERO), to satisfy // the semantic that the original expression produced some value and are // generated by "gtFoldExprConst" when it encounters checked arithmetic that // will determinably overflow. // // In the global morphing phase, "comma throws" are "propagated" up the tree, // in post-order, to eliminate nodes that will never execute. This method, // called by "fgMorphSmpOp", encapsulates this optimization. // // Arguments: // parent - the node currently being processed. // commaThrow - the comma throw in question, "parent"'s operand. // precedingSideEffects - side effects of nodes preceding "comma" in execution order. // // Return Value: // If "parent" is to be replaced with a comma throw, i. e. the propagation was successful, // the new "parent", otherwise "nullptr", guaranteeing no state change, with one exception: // the "fgRemoveRestOfBlock" "global" may be set. Note that the new returned tree does not // have to be a "comma throw", it can be "bare" throw call if the "parent" node did not // produce any value. // // Notes: // "Comma throws" are very rare. // GenTree* Compiler::fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects) { // Comma throw propagation does not preserve VNs, and deletes nodes. assert(fgGlobalMorph); assert(fgIsCommaThrow(commaThrow)); if ((commaThrow->gtFlags & GTF_COLON_COND) == 0) { fgRemoveRestOfBlock = true; } if ((precedingSideEffects & GTF_ALL_EFFECT) == 0) { if (parent->TypeIs(TYP_VOID)) { // Return the throw node as the new tree. return commaThrow->gtGetOp1(); } // Fix up the COMMA's type if needed. if (genActualType(parent) != genActualType(commaThrow)) { commaThrow->gtGetOp2()->BashToZeroConst(genActualType(parent)); commaThrow->ChangeType(genActualType(parent)); } return commaThrow; } return nullptr; } //---------------------------------------------------------------------------------------------- // fgMorphRetInd: Try to get rid of extra IND(ADDR()) pairs in a return tree. // // Arguments: // node - The return node that uses an indirection. // // Return Value: // the original op1 of the ret if there was no optimization or an optimized new op1. // GenTree* Compiler::fgMorphRetInd(GenTreeUnOp* ret) { assert(ret->OperIs(GT_RETURN)); assert(ret->gtGetOp1()->OperIs(GT_IND, GT_BLK, GT_OBJ)); GenTreeIndir* ind = ret->gtGetOp1()->AsIndir(); GenTree* addr = ind->Addr(); if (addr->OperIs(GT_ADDR) && addr->gtGetOp1()->OperIs(GT_LCL_VAR)) { // If struct promotion was undone, adjust the annotations if (fgGlobalMorph && fgMorphImplicitByRefArgs(addr)) { return ind; } // If `return` retypes LCL_VAR as a smaller struct it should not set `doNotEnregister` on that // LclVar. // Example: in `Vector128:AsVector2` we have RETURN SIMD8(OBJ SIMD8(ADDR byref(LCL_VAR SIMD16))). GenTreeLclVar* lclVar = addr->gtGetOp1()->AsLclVar(); if (!lvaIsImplicitByRefLocal(lclVar->GetLclNum())) { assert(!gtIsActiveCSE_Candidate(addr) && !gtIsActiveCSE_Candidate(ind)); unsigned indSize; if (ind->OperIs(GT_IND)) { indSize = genTypeSize(ind); } else { indSize = ind->AsBlk()->GetLayout()->GetSize(); } LclVarDsc* varDsc = lvaGetDesc(lclVar); unsigned lclVarSize; if (!lclVar->TypeIs(TYP_STRUCT)) { lclVarSize = genTypeSize(varDsc->TypeGet()); } else { lclVarSize = varDsc->lvExactSize; } // TODO: change conditions in `canFold` to `indSize <= lclVarSize`, but currently do not support `BITCAST // int<-SIMD16` etc. assert((indSize <= lclVarSize) || varDsc->lvDoNotEnregister); #if defined(TARGET_64BIT) bool canFold = (indSize == lclVarSize); #else // !TARGET_64BIT // TODO: improve 32 bit targets handling for LONG returns if necessary, nowadays we do not support `BITCAST // long<->double` there. bool canFold = (indSize == lclVarSize) && (lclVarSize <= REGSIZE_BYTES); #endif // TODO: support `genReturnBB != nullptr`, it requires #11413 to avoid `Incompatible types for // gtNewTempAssign`. if (canFold && (genReturnBB == nullptr)) { // Fold (TYPE1)*(&(TYPE2)x) even if types do not match, lowering will handle it. // Getting rid of this IND(ADDR()) pair allows to keep lclVar as not address taken // and enregister it. DEBUG_DESTROY_NODE(ind); DEBUG_DESTROY_NODE(addr); ret->gtOp1 = lclVar; // We use GTF_DONT_CSE as an "is under GT_ADDR" check. We can // get rid of it now since the GT_RETURN node should never have // its address taken. assert((ret->gtFlags & GTF_DONT_CSE) == 0); lclVar->gtFlags &= ~GTF_DONT_CSE; return lclVar; } else if (!varDsc->lvDoNotEnregister) { lvaSetVarDoNotEnregister(lclVar->GetLclNum() DEBUGARG(DoNotEnregisterReason::BlockOpRet)); } } } return ind; } #ifdef _PREFAST_ #pragma warning(pop) #endif GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree) { genTreeOps oper = tree->gtOper; GenTree* op1 = tree->gtOp1; GenTree* op2 = tree->gtOp2; var_types typ = tree->TypeGet(); if (fgGlobalMorph && GenTree::OperIsCommutative(oper)) { /* Swap the operands so that the more expensive one is 'op1' */ if (tree->gtFlags & GTF_REVERSE_OPS) { tree->gtOp1 = op2; tree->gtOp2 = op1; op2 = op1; op1 = tree->gtOp1; tree->gtFlags &= ~GTF_REVERSE_OPS; } if (oper == op2->gtOper) { /* Reorder nested operators at the same precedence level to be left-recursive. For example, change "(a+(b+c))" to the equivalent expression "((a+b)+c)". */ /* Things are handled differently for floating-point operators */ if (!varTypeIsFloating(tree->TypeGet())) { fgMoveOpsLeft(tree); op1 = tree->gtOp1; op2 = tree->gtOp2; } } } #if REARRANGE_ADDS /* Change "((x+icon)+y)" to "((x+y)+icon)" Don't reorder floating-point operations */ if (fgGlobalMorph && (oper == GT_ADD) && !tree->gtOverflow() && (op1->gtOper == GT_ADD) && !op1->gtOverflow() && varTypeIsIntegralOrI(typ)) { GenTree* ad1 = op1->AsOp()->gtOp1; GenTree* ad2 = op1->AsOp()->gtOp2; if (!op2->OperIsConst() && ad2->OperIsConst()) { // This takes // + (tree) // / \. // / \. // / \. // + (op1) op2 // / \. // / \. // ad1 ad2 // // and it swaps ad2 and op2. // Don't create a byref pointer that may point outside of the ref object. // If a GC happens, the byref won't get updated. This can happen if one // of the int components is negative. It also requires the address generation // be in a fully-interruptible code region. if (!varTypeIsGC(ad1->TypeGet()) && !varTypeIsGC(op2->TypeGet())) { tree->gtOp2 = ad2; op1->AsOp()->gtOp2 = op2; op1->gtFlags |= op2->gtFlags & GTF_ALL_EFFECT; op2 = tree->gtOp2; } } } #endif /*------------------------------------------------------------------------- * Perform optional oper-specific postorder morphing */ switch (oper) { case GT_ASG: // Make sure we're allowed to do this. if (optValnumCSE_phase) { // It is not safe to reorder/delete CSE's break; } if (varTypeIsStruct(typ) && !tree->IsPhiDefn()) { if (tree->OperIsCopyBlkOp()) { return fgMorphCopyBlock(tree); } else { return fgMorphInitBlock(tree); } } if (typ == TYP_LONG) { break; } if (op2->gtFlags & GTF_ASG) { break; } if ((op2->gtFlags & GTF_CALL) && (op1->gtFlags & GTF_ALL_EFFECT)) { break; } /* Special case: a cast that can be thrown away */ // TODO-Cleanup: fgMorphSmp does a similar optimization. However, it removes only // one cast and sometimes there is another one after it that gets removed by this // code. fgMorphSmp should be improved to remove all redundant casts so this code // can be removed. if (op1->gtOper == GT_IND && op2->gtOper == GT_CAST && !op2->gtOverflow()) { var_types srct; var_types cast; var_types dstt; srct = op2->AsCast()->CastOp()->TypeGet(); cast = (var_types)op2->CastToType(); dstt = op1->TypeGet(); /* Make sure these are all ints and precision is not lost */ if (genTypeSize(cast) >= genTypeSize(dstt) && dstt <= TYP_INT && srct <= TYP_INT) { op2 = tree->gtOp2 = op2->AsCast()->CastOp(); } } break; case GT_MUL: /* Check for the case "(val + icon) * icon" */ if (op2->gtOper == GT_CNS_INT && op1->gtOper == GT_ADD) { GenTree* add = op1->AsOp()->gtOp2; if (add->IsCnsIntOrI() && (op2->GetScaleIndexMul() != 0)) { if (tree->gtOverflow() || op1->gtOverflow()) { break; } ssize_t imul = op2->AsIntCon()->gtIconVal; ssize_t iadd = add->AsIntCon()->gtIconVal; /* Change '(val + iadd) * imul' -> '(val * imul) + (iadd * imul)' */ oper = GT_ADD; tree->ChangeOper(oper); op2->AsIntCon()->SetValueTruncating(iadd * imul); op1->ChangeOper(GT_MUL); add->AsIntCon()->SetIconValue(imul); } } break; case GT_DIV: /* For "val / 1", just return "val" */ if (op2->IsIntegralConst(1)) { DEBUG_DESTROY_NODE(tree); return op1; } break; case GT_UDIV: case GT_UMOD: tree->CheckDivideByConstOptimized(this); break; case GT_LSH: /* Check for the case "(val + icon) << icon" */ if (!optValnumCSE_phase && op2->IsCnsIntOrI() && op1->gtOper == GT_ADD && !op1->gtOverflow()) { GenTree* cns = op1->AsOp()->gtOp2; if (cns->IsCnsIntOrI() && (op2->GetScaleIndexShf() != 0)) { ssize_t ishf = op2->AsIntConCommon()->IconValue(); ssize_t iadd = cns->AsIntConCommon()->IconValue(); // printf("Changing '(val+icon1)<<icon2' into '(val<<icon2+icon1<<icon2)'\n"); /* Change "(val + iadd) << ishf" into "(val<<ishf + iadd<<ishf)" */ tree->ChangeOper(GT_ADD); // we are reusing the shift amount node here, but the type we want is that of the shift result op2->gtType = op1->gtType; op2->AsIntConCommon()->SetValueTruncating(iadd << ishf); if (cns->gtOper == GT_CNS_INT && cns->AsIntCon()->gtFieldSeq != nullptr && cns->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq()) { assert(cns->AsIntCon()->gtFieldSeq->m_next == nullptr); op2->AsIntCon()->gtFieldSeq = cns->AsIntCon()->gtFieldSeq; } op1->ChangeOper(GT_LSH); cns->AsIntConCommon()->SetIconValue(ishf); } } break; case GT_XOR: if (!optValnumCSE_phase) { /* "x ^ -1" is "~x" */ if (op2->IsIntegralConst(-1)) { tree->ChangeOper(GT_NOT); tree->gtOp2 = nullptr; DEBUG_DESTROY_NODE(op2); } else if (op2->IsIntegralConst(1) && op1->OperIsCompare()) { /* "binaryVal ^ 1" is "!binaryVal" */ gtReverseCond(op1); DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(tree); return op1; } } break; case GT_INIT_VAL: // Initialization values for initBlk have special semantics - their lower // byte is used to fill the struct. However, we allow 0 as a "bare" value, // which enables them to get a VNForZero, and be propagated. if (op1->IsIntegralConst(0)) { return op1; } break; default: break; } return tree; } #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) //------------------------------------------------------------------------ // fgMorphMultiOp: Morph a GenTreeMultiOp (SIMD/HWINTRINSIC) tree. // // Arguments: // multiOp - The tree to morph // // Return Value: // The fully morphed tree. // GenTree* Compiler::fgMorphMultiOp(GenTreeMultiOp* multiOp) { gtUpdateNodeOperSideEffects(multiOp); bool dontCseConstArguments = false; #if defined(FEATURE_HW_INTRINSICS) // Opportunistically, avoid unexpected CSE for hw intrinsics with IMM arguments if (multiOp->OperIs(GT_HWINTRINSIC)) { NamedIntrinsic hwIntrinsic = multiOp->AsHWIntrinsic()->GetHWIntrinsicId(); #if defined(TARGET_XARCH) if (HWIntrinsicInfo::lookupCategory(hwIntrinsic) == HW_Category_IMM) { dontCseConstArguments = true; } #elif defined(TARGET_ARMARCH) if (HWIntrinsicInfo::HasImmediateOperand(hwIntrinsic)) { dontCseConstArguments = true; } #endif } #endif for (GenTree** use : multiOp->UseEdges()) { *use = fgMorphTree(*use); GenTree* operand = *use; multiOp->gtFlags |= (operand->gtFlags & GTF_ALL_EFFECT); if (dontCseConstArguments && operand->OperIsConst()) { operand->SetDoNotCSE(); } // Promoted structs after morph must be in one of two states: // a) Fully eliminated from the IR (independent promotion) OR only be // used by "special" nodes (e. g. LHS of ASGs for multi-reg structs). // b) Marked as do-not-enregister (dependent promotion). // // So here we preserve this invariant and mark any promoted structs as do-not-enreg. // if (operand->OperIs(GT_LCL_VAR) && lvaGetDesc(operand->AsLclVar())->lvPromoted) { lvaSetVarDoNotEnregister(operand->AsLclVar()->GetLclNum() DEBUGARG(DoNotEnregisterReason::SimdUserForcesDep)); } } #if defined(FEATURE_HW_INTRINSICS) if (opts.OptimizationEnabled() && multiOp->OperIs(GT_HWINTRINSIC)) { GenTreeHWIntrinsic* hw = multiOp->AsHWIntrinsic(); switch (hw->GetHWIntrinsicId()) { #if defined(TARGET_XARCH) case NI_SSE_Xor: case NI_SSE2_Xor: case NI_AVX_Xor: case NI_AVX2_Xor: { // Transform XOR(X, 0) to X for vectors GenTree* op1 = hw->Op(1); GenTree* op2 = hw->Op(2); if (!gtIsActiveCSE_Candidate(hw)) { if (op1->IsIntegralConstVector(0) && !gtIsActiveCSE_Candidate(op1)) { DEBUG_DESTROY_NODE(hw); DEBUG_DESTROY_NODE(op1); return op2; } if (op2->IsIntegralConstVector(0) && !gtIsActiveCSE_Candidate(op2)) { DEBUG_DESTROY_NODE(hw); DEBUG_DESTROY_NODE(op2); return op1; } } break; } #endif case NI_Vector128_Create: #if defined(TARGET_XARCH) case NI_Vector256_Create: #elif defined(TARGET_ARMARCH) case NI_Vector64_Create: #endif { bool hwAllArgsAreConst = true; for (GenTree** use : multiOp->UseEdges()) { if (!(*use)->OperIsConst()) { hwAllArgsAreConst = false; break; } } // Avoid unexpected CSE for constant arguments for Vector_.Create // but only if all arguments are constants. if (hwAllArgsAreConst) { for (GenTree** use : multiOp->UseEdges()) { (*use)->SetDoNotCSE(); } } } break; default: break; } } #endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) #ifdef FEATURE_HW_INTRINSICS if (multiOp->OperIsHWIntrinsic() && !optValnumCSE_phase) { return fgOptimizeHWIntrinsic(multiOp->AsHWIntrinsic()); } #endif return multiOp; } #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) //------------------------------------------------------------------------ // fgMorphModToSubMulDiv: Transform a % b into the equivalent a - (a / b) * b // (see ECMA III 3.55 and III.3.56). // // Arguments: // tree - The GT_MOD/GT_UMOD tree to morph // // Returns: // The morphed tree // // Notes: // For ARM64 we don't have a remainder instruction so this transform is // always done. For XARCH this transform is done if we know that magic // division will be used, in that case this transform allows CSE to // eliminate the redundant div from code like "x = a / 3; y = a % 3;". // GenTree* Compiler::fgMorphModToSubMulDiv(GenTreeOp* tree) { JITDUMP("\nMorphing MOD/UMOD [%06u] to Sub/Mul/Div\n", dspTreeID(tree)); if (tree->OperGet() == GT_MOD) { tree->SetOper(GT_DIV); } else if (tree->OperGet() == GT_UMOD) { tree->SetOper(GT_UDIV); } else { noway_assert(!"Illegal gtOper in fgMorphModToSubMulDiv"); } var_types type = tree->gtType; GenTree* const copyOfNumeratorValue = fgMakeMultiUse(&tree->gtOp1); GenTree* const copyOfDenominatorValue = fgMakeMultiUse(&tree->gtOp2); GenTree* const mul = gtNewOperNode(GT_MUL, type, tree, copyOfDenominatorValue); GenTree* const sub = gtNewOperNode(GT_SUB, type, copyOfNumeratorValue, mul); // Ensure "sub" does not evaluate "copyOfNumeratorValue" before it is defined by "mul". // sub->gtFlags |= GTF_REVERSE_OPS; #ifdef DEBUG sub->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif tree->CheckDivideByConstOptimized(this); return sub; } //------------------------------------------------------------------------------ // fgOperIsBitwiseRotationRoot : Check if the operation can be a root of a bitwise rotation tree. // // // Arguments: // oper - Operation to check // // Return Value: // True if the operation can be a root of a bitwise rotation tree; false otherwise. bool Compiler::fgOperIsBitwiseRotationRoot(genTreeOps oper) { return (oper == GT_OR) || (oper == GT_XOR); } //------------------------------------------------------------------------------ // fgRecognizeAndMorphBitwiseRotation : Check if the tree represents a left or right rotation. If so, return // an equivalent GT_ROL or GT_ROR tree; otherwise, return the original tree. // // Arguments: // tree - tree to check for a rotation pattern // // Return Value: // An equivalent GT_ROL or GT_ROR tree if a pattern is found; "nullptr" otherwise. // // Assumption: // The input is a GT_OR or a GT_XOR tree. GenTree* Compiler::fgRecognizeAndMorphBitwiseRotation(GenTree* tree) { // // Check for a rotation pattern, e.g., // // OR ROL // / \ / \. // LSH RSZ -> x y // / \ / \. // x AND x AND // / \ / \. // y 31 ADD 31 // / \. // NEG 32 // | // y // The patterns recognized: // (x << (y & M)) op (x >>> ((-y + N) & M)) // (x >>> ((-y + N) & M)) op (x << (y & M)) // // (x << y) op (x >>> (-y + N)) // (x >> > (-y + N)) op (x << y) // // (x >>> (y & M)) op (x << ((-y + N) & M)) // (x << ((-y + N) & M)) op (x >>> (y & M)) // // (x >>> y) op (x << (-y + N)) // (x << (-y + N)) op (x >>> y) // // (x << c1) op (x >>> c2) // (x >>> c1) op (x << c2) // // where // c1 and c2 are const // c1 + c2 == bitsize(x) // N == bitsize(x) // M is const // M & (N - 1) == N - 1 // op is either | or ^ if (((tree->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0) || ((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0)) { // We can't do anything if the tree has assignments, calls, or volatile // reads. Note that we allow GTF_EXCEPT side effect since any exceptions // thrown by the original tree will be thrown by the transformed tree as well. return nullptr; } genTreeOps oper = tree->OperGet(); assert(fgOperIsBitwiseRotationRoot(oper)); // Check if we have an LSH on one side of the OR and an RSZ on the other side. GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); GenTree* leftShiftTree = nullptr; GenTree* rightShiftTree = nullptr; if ((op1->OperGet() == GT_LSH) && (op2->OperGet() == GT_RSZ)) { leftShiftTree = op1; rightShiftTree = op2; } else if ((op1->OperGet() == GT_RSZ) && (op2->OperGet() == GT_LSH)) { leftShiftTree = op2; rightShiftTree = op1; } else { return nullptr; } // Check if the trees representing the value to shift are identical. // We already checked that there are no side effects above. if (GenTree::Compare(leftShiftTree->gtGetOp1(), rightShiftTree->gtGetOp1())) { GenTree* rotatedValue = leftShiftTree->gtGetOp1(); var_types rotatedValueActualType = genActualType(rotatedValue->gtType); ssize_t rotatedValueBitSize = genTypeSize(rotatedValueActualType) * 8; noway_assert((rotatedValueBitSize == 32) || (rotatedValueBitSize == 64)); GenTree* leftShiftIndex = leftShiftTree->gtGetOp2(); GenTree* rightShiftIndex = rightShiftTree->gtGetOp2(); // The shift index may be masked. At least (rotatedValueBitSize - 1) lower bits // shouldn't be masked for the transformation to be valid. If additional // higher bits are not masked, the transformation is still valid since the result // of MSIL shift instructions is unspecified if the shift amount is greater or equal // than the width of the value being shifted. ssize_t minimalMask = rotatedValueBitSize - 1; ssize_t leftShiftMask = -1; ssize_t rightShiftMask = -1; if ((leftShiftIndex->OperGet() == GT_AND)) { if (leftShiftIndex->gtGetOp2()->IsCnsIntOrI()) { leftShiftMask = leftShiftIndex->gtGetOp2()->AsIntCon()->gtIconVal; leftShiftIndex = leftShiftIndex->gtGetOp1(); } else { return nullptr; } } if ((rightShiftIndex->OperGet() == GT_AND)) { if (rightShiftIndex->gtGetOp2()->IsCnsIntOrI()) { rightShiftMask = rightShiftIndex->gtGetOp2()->AsIntCon()->gtIconVal; rightShiftIndex = rightShiftIndex->gtGetOp1(); } else { return nullptr; } } if (((minimalMask & leftShiftMask) != minimalMask) || ((minimalMask & rightShiftMask) != minimalMask)) { // The shift index is overmasked, e.g., we have // something like (x << y & 15) or // (x >> (32 - y) & 15 with 32 bit x. // The transformation is not valid. return nullptr; } GenTree* shiftIndexWithAdd = nullptr; GenTree* shiftIndexWithoutAdd = nullptr; genTreeOps rotateOp = GT_NONE; GenTree* rotateIndex = nullptr; if (leftShiftIndex->OperGet() == GT_ADD) { shiftIndexWithAdd = leftShiftIndex; shiftIndexWithoutAdd = rightShiftIndex; rotateOp = GT_ROR; } else if (rightShiftIndex->OperGet() == GT_ADD) { shiftIndexWithAdd = rightShiftIndex; shiftIndexWithoutAdd = leftShiftIndex; rotateOp = GT_ROL; } if (shiftIndexWithAdd != nullptr) { if (shiftIndexWithAdd->gtGetOp2()->IsCnsIntOrI()) { if (shiftIndexWithAdd->gtGetOp2()->AsIntCon()->gtIconVal == rotatedValueBitSize) { if (shiftIndexWithAdd->gtGetOp1()->OperGet() == GT_NEG) { if (GenTree::Compare(shiftIndexWithAdd->gtGetOp1()->gtGetOp1(), shiftIndexWithoutAdd)) { // We found one of these patterns: // (x << (y & M)) | (x >>> ((-y + N) & M)) // (x << y) | (x >>> (-y + N)) // (x >>> (y & M)) | (x << ((-y + N) & M)) // (x >>> y) | (x << (-y + N)) // where N == bitsize(x), M is const, and // M & (N - 1) == N - 1 CLANG_FORMAT_COMMENT_ANCHOR; #ifndef TARGET_64BIT if (!shiftIndexWithoutAdd->IsCnsIntOrI() && (rotatedValueBitSize == 64)) { // TODO-X86-CQ: we need to handle variable-sized long shifts specially on x86. // GT_LSH, GT_RSH, and GT_RSZ have helpers for this case. We may need // to add helpers for GT_ROL and GT_ROR. return nullptr; } #endif rotateIndex = shiftIndexWithoutAdd; } } } } } else if ((leftShiftIndex->IsCnsIntOrI() && rightShiftIndex->IsCnsIntOrI())) { if (leftShiftIndex->AsIntCon()->gtIconVal + rightShiftIndex->AsIntCon()->gtIconVal == rotatedValueBitSize) { // We found this pattern: // (x << c1) | (x >>> c2) // where c1 and c2 are const and c1 + c2 == bitsize(x) rotateOp = GT_ROL; rotateIndex = leftShiftIndex; } } if (rotateIndex != nullptr) { noway_assert(GenTree::OperIsRotate(rotateOp)); GenTreeFlags inputTreeEffects = tree->gtFlags & GTF_ALL_EFFECT; // We can use the same tree only during global morph; reusing the tree in a later morph // may invalidate value numbers. if (fgGlobalMorph) { tree->AsOp()->gtOp1 = rotatedValue; tree->AsOp()->gtOp2 = rotateIndex; tree->ChangeOper(rotateOp); unsigned childFlags = 0; for (GenTree* op : tree->Operands()) { childFlags |= (op->gtFlags & GTF_ALL_EFFECT); } // The parent's flags should be a superset of its operands' flags noway_assert((inputTreeEffects & childFlags) == childFlags); } else { tree = gtNewOperNode(rotateOp, rotatedValueActualType, rotatedValue, rotateIndex); noway_assert(inputTreeEffects == (tree->gtFlags & GTF_ALL_EFFECT)); } return tree; } } return nullptr; } #if !defined(TARGET_64BIT) //------------------------------------------------------------------------------ // fgRecognizeAndMorphLongMul : Check for and morph long multiplication with 32 bit operands. // // Uses "GenTree::IsValidLongMul" to check for the long multiplication pattern. Will swap // operands if the first one is a constant and the second one is not, even for trees which // end up not being eligibile for long multiplication. // // Arguments: // mul - GT_MUL tree to check for a long multiplication opportunity // // Return Value: // The original tree, with operands possibly swapped, if it is not eligible for long multiplication. // Tree with GTF_MUL_64RSLT set, side effect flags propagated, and children morphed if it is. // GenTreeOp* Compiler::fgRecognizeAndMorphLongMul(GenTreeOp* mul) { assert(mul->OperIs(GT_MUL)); assert(mul->TypeIs(TYP_LONG)); GenTree* op1 = mul->gtGetOp1(); GenTree* op2 = mul->gtGetOp2(); // "IsValidLongMul" and decomposition do not handle constant op1. if (op1->IsIntegralConst()) { std::swap(op1, op2); mul->gtOp1 = op1; mul->gtOp2 = op2; } if (!mul->IsValidLongMul()) { return mul; } // MUL_LONG needs to do the work the casts would have done. mul->ClearUnsigned(); if (op1->IsUnsigned()) { mul->SetUnsigned(); } // "IsValidLongMul" returned "true", so this GT_MUL cannot overflow. mul->ClearOverflow(); mul->Set64RsltMul(); return fgMorphLongMul(mul); } //------------------------------------------------------------------------------ // fgMorphLongMul : Morphs GT_MUL nodes marked with GTF_MUL_64RSLT. // // Morphs *only* the operands of casts that compose the long mul to // avoid them being folded aways. // // Arguments: // mul - GT_MUL tree to morph operands of // // Return Value: // The original tree, with operands morphed and flags propagated. // GenTreeOp* Compiler::fgMorphLongMul(GenTreeOp* mul) { INDEBUG(mul->DebugCheckLongMul()); GenTree* op1 = mul->gtGetOp1(); GenTree* op2 = mul->gtGetOp2(); // Morph the operands. We cannot allow the casts to go away, so we morph their operands directly. op1->AsCast()->CastOp() = fgMorphTree(op1->AsCast()->CastOp()); op1->SetAllEffectsFlags(op1->AsCast()->CastOp()); if (op2->OperIs(GT_CAST)) { op2->AsCast()->CastOp() = fgMorphTree(op2->AsCast()->CastOp()); op2->SetAllEffectsFlags(op2->AsCast()->CastOp()); } mul->SetAllEffectsFlags(op1, op2); op1->SetDoNotCSE(); op2->SetDoNotCSE(); return mul; } #endif // !defined(TARGET_64BIT) /***************************************************************************** * * Transform the given tree for code generation and return an equivalent tree. */ GenTree* Compiler::fgMorphTree(GenTree* tree, MorphAddrContext* mac) { assert(tree); #ifdef DEBUG if (verbose) { if ((unsigned)JitConfig.JitBreakMorphTree() == tree->gtTreeID) { noway_assert(!"JitBreakMorphTree hit"); } } #endif #ifdef DEBUG int thisMorphNum = 0; if (verbose && treesBeforeAfterMorph) { thisMorphNum = morphNum++; printf("\nfgMorphTree (before %d):\n", thisMorphNum); gtDispTree(tree); } #endif if (fgGlobalMorph) { // Apply any rewrites for implicit byref arguments before morphing the // tree. if (fgMorphImplicitByRefArgs(tree)) { #ifdef DEBUG if (verbose && treesBeforeAfterMorph) { printf("\nfgMorphTree (%d), after implicit-byref rewrite:\n", thisMorphNum); gtDispTree(tree); } #endif } } /*------------------------------------------------------------------------- * fgMorphTree() can potentially replace a tree with another, and the * caller has to store the return value correctly. * Turn this on to always make copy of "tree" here to shake out * hidden/unupdated references. */ #ifdef DEBUG if (compStressCompile(STRESS_GENERIC_CHECK, 0)) { GenTree* copy; if (GenTree::s_gtNodeSizes[tree->gtOper] == TREE_NODE_SZ_SMALL) { copy = gtNewLargeOperNode(GT_ADD, TYP_INT); } else { copy = new (this, GT_CALL) GenTreeCall(TYP_INT); } copy->ReplaceWith(tree, this); #if defined(LATE_DISASM) // GT_CNS_INT is considered small, so ReplaceWith() won't copy all fields if ((tree->gtOper == GT_CNS_INT) && tree->IsIconHandle()) { copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle; } #endif DEBUG_DESTROY_NODE(tree); tree = copy; } #endif // DEBUG if (fgGlobalMorph) { /* Ensure that we haven't morphed this node already */ assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0) && "ERROR: Already morphed this node!"); /* Before morphing the tree, we try to propagate any active assertions */ if (optLocalAssertionProp) { /* Do we have any active assertions? */ if (optAssertionCount > 0) { GenTree* newTree = tree; while (newTree != nullptr) { tree = newTree; /* newTree is non-Null if we propagated an assertion */ newTree = optAssertionProp(apFull, tree, nullptr, nullptr); } assert(tree != nullptr); } } PREFAST_ASSUME(tree != nullptr); } /* Save the original un-morphed tree for fgMorphTreeDone */ GenTree* oldTree = tree; /* Figure out what kind of a node we have */ unsigned kind = tree->OperKind(); /* Is this a constant node? */ if (tree->OperIsConst()) { tree = fgMorphConst(tree); goto DONE; } /* Is this a leaf node? */ if (kind & GTK_LEAF) { tree = fgMorphLeaf(tree); goto DONE; } /* Is it a 'simple' unary/binary operator? */ if (kind & GTK_SMPOP) { tree = fgMorphSmpOp(tree, mac); goto DONE; } /* See what kind of a special operator we have here */ switch (tree->OperGet()) { case GT_CALL: if (tree->OperMayThrow(this)) { tree->gtFlags |= GTF_EXCEPT; } else { tree->gtFlags &= ~GTF_EXCEPT; } tree = fgMorphCall(tree->AsCall()); break; #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) #if defined(FEATURE_SIMD) case GT_SIMD: #endif #if defined(FEATURE_HW_INTRINSICS) case GT_HWINTRINSIC: #endif tree = fgMorphMultiOp(tree->AsMultiOp()); break; #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) case GT_ARR_ELEM: tree->AsArrElem()->gtArrObj = fgMorphTree(tree->AsArrElem()->gtArrObj); unsigned dim; for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++) { tree->AsArrElem()->gtArrInds[dim] = fgMorphTree(tree->AsArrElem()->gtArrInds[dim]); } tree->gtFlags &= ~GTF_CALL; tree->gtFlags |= tree->AsArrElem()->gtArrObj->gtFlags & GTF_ALL_EFFECT; for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++) { tree->gtFlags |= tree->AsArrElem()->gtArrInds[dim]->gtFlags & GTF_ALL_EFFECT; } if (fgGlobalMorph) { fgSetRngChkTarget(tree, false); } break; case GT_ARR_OFFSET: tree->AsArrOffs()->gtOffset = fgMorphTree(tree->AsArrOffs()->gtOffset); tree->AsArrOffs()->gtIndex = fgMorphTree(tree->AsArrOffs()->gtIndex); tree->AsArrOffs()->gtArrObj = fgMorphTree(tree->AsArrOffs()->gtArrObj); tree->gtFlags &= ~GTF_CALL; tree->gtFlags |= tree->AsArrOffs()->gtOffset->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsArrOffs()->gtIndex->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsArrOffs()->gtArrObj->gtFlags & GTF_ALL_EFFECT; if (fgGlobalMorph) { fgSetRngChkTarget(tree, false); } break; case GT_PHI: tree->gtFlags &= ~GTF_ALL_EFFECT; for (GenTreePhi::Use& use : tree->AsPhi()->Uses()) { use.SetNode(fgMorphTree(use.GetNode())); tree->gtFlags |= use.GetNode()->gtFlags & GTF_ALL_EFFECT; } break; case GT_FIELD_LIST: tree->gtFlags &= ~GTF_ALL_EFFECT; for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses()) { use.SetNode(fgMorphTree(use.GetNode())); tree->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT); } break; case GT_CMPXCHG: tree->AsCmpXchg()->gtOpLocation = fgMorphTree(tree->AsCmpXchg()->gtOpLocation); tree->AsCmpXchg()->gtOpValue = fgMorphTree(tree->AsCmpXchg()->gtOpValue); tree->AsCmpXchg()->gtOpComparand = fgMorphTree(tree->AsCmpXchg()->gtOpComparand); tree->gtFlags &= (~GTF_EXCEPT & ~GTF_CALL); tree->gtFlags |= tree->AsCmpXchg()->gtOpLocation->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsCmpXchg()->gtOpValue->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsCmpXchg()->gtOpComparand->gtFlags & GTF_ALL_EFFECT; break; case GT_STORE_DYN_BLK: tree = fgMorphStoreDynBlock(tree->AsStoreDynBlk()); break; default: #ifdef DEBUG gtDispTree(tree); #endif noway_assert(!"unexpected operator"); } DONE: fgMorphTreeDone(tree, oldTree DEBUGARG(thisMorphNum)); return tree; } //------------------------------------------------------------------------ // fgKillDependentAssertionsSingle: Kill all assertions specific to lclNum // // Arguments: // lclNum - The varNum of the lclVar for which we're killing assertions. // tree - (DEBUG only) the tree responsible for killing its assertions. // void Compiler::fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree)) { /* All dependent assertions are killed here */ ASSERT_TP killed = BitVecOps::MakeCopy(apTraits, GetAssertionDep(lclNum)); if (killed) { AssertionIndex index = optAssertionCount; while (killed && (index > 0)) { if (BitVecOps::IsMember(apTraits, killed, index - 1)) { #ifdef DEBUG AssertionDsc* curAssertion = optGetAssertion(index); noway_assert((curAssertion->op1.lcl.lclNum == lclNum) || ((curAssertion->op2.kind == O2K_LCLVAR_COPY) && (curAssertion->op2.lcl.lclNum == lclNum))); if (verbose) { printf("\nThe assignment "); printTreeID(tree); printf(" using V%02u removes: ", curAssertion->op1.lcl.lclNum); optPrintAssertion(curAssertion); } #endif // Remove this bit from the killed mask BitVecOps::RemoveElemD(apTraits, killed, index - 1); optAssertionRemove(index); } index--; } // killed mask should now be zero noway_assert(BitVecOps::IsEmpty(apTraits, killed)); } } //------------------------------------------------------------------------ // fgKillDependentAssertions: Kill all dependent assertions with regard to lclNum. // // Arguments: // lclNum - The varNum of the lclVar for which we're killing assertions. // tree - (DEBUG only) the tree responsible for killing its assertions. // // Notes: // For structs and struct fields, it will invalidate the children and parent // respectively. // Calls fgKillDependentAssertionsSingle to kill the assertions for a single lclVar. // void Compiler::fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree)) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvPromoted) { noway_assert(varTypeIsStruct(varDsc)); // Kill the field locals. for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i) { fgKillDependentAssertionsSingle(i DEBUGARG(tree)); } // Kill the struct local itself. fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree)); } else if (varDsc->lvIsStructField) { // Kill the field local. fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree)); // Kill the parent struct. fgKillDependentAssertionsSingle(varDsc->lvParentLcl DEBUGARG(tree)); } else { fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree)); } } /***************************************************************************** * * This function is called to complete the morphing of a tree node * It should only be called once for each node. * If DEBUG is defined the flag GTF_DEBUG_NODE_MORPHED is checked and updated, * to enforce the invariant that each node is only morphed once. * If local assertion prop is enabled the result tree may be replaced * by an equivalent tree. * */ void Compiler::fgMorphTreeDone(GenTree* tree, GenTree* oldTree /* == NULL */ DEBUGARG(int morphNum)) { #ifdef DEBUG if (verbose && treesBeforeAfterMorph) { printf("\nfgMorphTree (after %d):\n", morphNum); gtDispTree(tree); printf(""); // in our logic this causes a flush } #endif if (!fgGlobalMorph) { return; } if ((oldTree != nullptr) && (oldTree != tree)) { /* Ensure that we have morphed this node */ assert((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) && "ERROR: Did not morph this node!"); #ifdef DEBUG TransferTestDataToNode(oldTree, tree); #endif } else { // Ensure that we haven't morphed this node already assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0) && "ERROR: Already morphed this node!"); } if (tree->OperIsConst()) { goto DONE; } if (!optLocalAssertionProp) { goto DONE; } /* Do we have any active assertions? */ if (optAssertionCount > 0) { /* Is this an assignment to a local variable */ GenTreeLclVarCommon* lclVarTree = nullptr; // The check below will miss LIR-style assignments. // // But we shouldn't be running local assertion prop on these, // as local prop gets disabled when we run global prop. assert(!tree->OperIs(GT_STORE_LCL_VAR, GT_STORE_LCL_FLD)); // DefinesLocal can return true for some BLK op uses, so // check what gets assigned only when we're at an assignment. if (tree->OperIs(GT_ASG) && tree->DefinesLocal(this, &lclVarTree)) { unsigned lclNum = lclVarTree->GetLclNum(); noway_assert(lclNum < lvaCount); fgKillDependentAssertions(lclNum DEBUGARG(tree)); } } /* If this tree makes a new assertion - make it available */ optAssertionGen(tree); DONE:; #ifdef DEBUG /* Mark this node as being morphed */ tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif } //------------------------------------------------------------------------ // fgFoldConditional: try and fold conditionals and optimize BBJ_COND or // BBJ_SWITCH blocks. // // Argumetns: // block - block to examine // // Returns: // FoldResult indicating what changes were made, if any // Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) { FoldResult result = FoldResult::FOLD_DID_NOTHING; // We don't want to make any code unreachable // if (opts.OptimizationDisabled()) { return result; } if (block->bbJumpKind == BBJ_COND) { noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr); Statement* lastStmt = block->lastStmt(); noway_assert(lastStmt->GetNextStmt() == nullptr); if (lastStmt->GetRootNode()->gtOper == GT_CALL) { noway_assert(fgRemoveRestOfBlock); // Unconditional throw - transform the basic block into a BBJ_THROW // fgConvertBBToThrowBB(block); result = FoldResult::FOLD_CHANGED_CONTROL_FLOW; JITDUMP("\nConditional folded at " FMT_BB "\n", block->bbNum); JITDUMP(FMT_BB " becomes a BBJ_THROW\n", block->bbNum); return result; } noway_assert(lastStmt->GetRootNode()->gtOper == GT_JTRUE); /* Did we fold the conditional */ noway_assert(lastStmt->GetRootNode()->AsOp()->gtOp1); GenTree* condTree; condTree = lastStmt->GetRootNode()->AsOp()->gtOp1; GenTree* cond; cond = condTree->gtEffectiveVal(true); if (cond->OperIsConst()) { /* Yupee - we folded the conditional! * Remove the conditional statement */ noway_assert(cond->gtOper == GT_CNS_INT); noway_assert((block->bbNext->countOfInEdges() > 0) && (block->bbJumpDest->countOfInEdges() > 0)); if (condTree != cond) { // Preserve any side effects assert(condTree->OperIs(GT_COMMA)); lastStmt->SetRootNode(condTree); result = FoldResult::FOLD_ALTERED_LAST_STMT; } else { // no side effects, remove the jump entirely fgRemoveStmt(block, lastStmt); result = FoldResult::FOLD_REMOVED_LAST_STMT; } // block is a BBJ_COND that we are folding the conditional for. // bTaken is the path that will always be taken from block. // bNotTaken is the path that will never be taken from block. // BasicBlock* bTaken; BasicBlock* bNotTaken; if (cond->AsIntCon()->gtIconVal != 0) { /* JTRUE 1 - transform the basic block into a BBJ_ALWAYS */ block->bbJumpKind = BBJ_ALWAYS; bTaken = block->bbJumpDest; bNotTaken = block->bbNext; } else { /* Unmark the loop if we are removing a backwards branch */ /* dest block must also be marked as a loop head and */ /* We must be able to reach the backedge block */ if ((block->bbJumpDest->isLoopHead()) && (block->bbJumpDest->bbNum <= block->bbNum) && fgReachable(block->bbJumpDest, block)) { optUnmarkLoopBlocks(block->bbJumpDest, block); } /* JTRUE 0 - transform the basic block into a BBJ_NONE */ block->bbJumpKind = BBJ_NONE; bTaken = block->bbNext; bNotTaken = block->bbJumpDest; } if (fgHaveValidEdgeWeights) { // We are removing an edge from block to bNotTaken // and we have already computed the edge weights, so // we will try to adjust some of the weights // flowList* edgeTaken = fgGetPredForBlock(bTaken, block); BasicBlock* bUpdated = nullptr; // non-NULL if we updated the weight of an internal block // We examine the taken edge (block -> bTaken) // if block has valid profile weight and bTaken does not we try to adjust bTaken's weight // else if bTaken has valid profile weight and block does not we try to adjust block's weight // We can only adjust the block weights when (the edge block -> bTaken) is the only edge into bTaken // if (block->hasProfileWeight()) { // The edge weights for (block -> bTaken) are 100% of block's weight edgeTaken->setEdgeWeights(block->bbWeight, block->bbWeight, bTaken); if (!bTaken->hasProfileWeight()) { if ((bTaken->countOfInEdges() == 1) || (bTaken->bbWeight < block->bbWeight)) { // Update the weight of bTaken bTaken->inheritWeight(block); bUpdated = bTaken; } } } else if (bTaken->hasProfileWeight()) { if (bTaken->countOfInEdges() == 1) { // There is only one in edge to bTaken edgeTaken->setEdgeWeights(bTaken->bbWeight, bTaken->bbWeight, bTaken); // Update the weight of block block->inheritWeight(bTaken); bUpdated = block; } } if (bUpdated != nullptr) { weight_t newMinWeight; weight_t newMaxWeight; flowList* edge; // Now fix the weights of the edges out of 'bUpdated' switch (bUpdated->bbJumpKind) { case BBJ_NONE: edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext); break; case BBJ_COND: edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext); FALLTHROUGH; case BBJ_ALWAYS: edge = fgGetPredForBlock(bUpdated->bbJumpDest, bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext); break; default: // We don't handle BBJ_SWITCH break; } } } /* modify the flow graph */ /* Remove 'block' from the predecessor list of 'bNotTaken' */ fgRemoveRefPred(bNotTaken, block); #ifdef DEBUG if (verbose) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); printf(FMT_BB " becomes a %s", block->bbNum, block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); if (block->bbJumpKind == BBJ_ALWAYS) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } printf("\n"); } #endif /* if the block was a loop condition we may have to modify * the loop table */ for (unsigned loopNum = 0; loopNum < optLoopCount; loopNum++) { /* Some loops may have been already removed by * loop unrolling or conditional folding */ if (optLoopTable[loopNum].lpFlags & LPFLG_REMOVED) { continue; } /* We are only interested in the loop bottom */ if (optLoopTable[loopNum].lpBottom == block) { if (cond->AsIntCon()->gtIconVal == 0) { /* This was a bogus loop (condition always false) * Remove the loop from the table */ optMarkLoopRemoved(loopNum); optLoopTable[loopNum].lpTop->unmarkLoopAlign(this DEBUG_ARG("Bogus loop")); #ifdef DEBUG if (verbose) { printf("Removing loop " FMT_LP " (from " FMT_BB " to " FMT_BB ")\n\n", loopNum, optLoopTable[loopNum].lpTop->bbNum, optLoopTable[loopNum].lpBottom->bbNum); } #endif } } } } } else if (block->bbJumpKind == BBJ_SWITCH) { noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr); Statement* lastStmt = block->lastStmt(); noway_assert(lastStmt->GetNextStmt() == nullptr); if (lastStmt->GetRootNode()->gtOper == GT_CALL) { noway_assert(fgRemoveRestOfBlock); // Unconditional throw - transform the basic block into a BBJ_THROW // fgConvertBBToThrowBB(block); result = FoldResult::FOLD_CHANGED_CONTROL_FLOW; JITDUMP("\nConditional folded at " FMT_BB "\n", block->bbNum); JITDUMP(FMT_BB " becomes a BBJ_THROW\n", block->bbNum); return result; } noway_assert(lastStmt->GetRootNode()->gtOper == GT_SWITCH); /* Did we fold the conditional */ noway_assert(lastStmt->GetRootNode()->AsOp()->gtOp1); GenTree* condTree; condTree = lastStmt->GetRootNode()->AsOp()->gtOp1; GenTree* cond; cond = condTree->gtEffectiveVal(true); if (cond->OperIsConst()) { /* Yupee - we folded the conditional! * Remove the conditional statement */ noway_assert(cond->gtOper == GT_CNS_INT); if (condTree != cond) { // Preserve any side effects assert(condTree->OperIs(GT_COMMA)); lastStmt->SetRootNode(condTree); result = FoldResult::FOLD_ALTERED_LAST_STMT; } else { // no side effects, remove the switch entirely fgRemoveStmt(block, lastStmt); result = FoldResult::FOLD_REMOVED_LAST_STMT; } /* modify the flow graph */ /* Find the actual jump target */ unsigned switchVal; switchVal = (unsigned)cond->AsIntCon()->gtIconVal; unsigned jumpCnt; jumpCnt = block->bbJumpSwt->bbsCount; BasicBlock** jumpTab; jumpTab = block->bbJumpSwt->bbsDstTab; bool foundVal; foundVal = false; for (unsigned val = 0; val < jumpCnt; val++, jumpTab++) { BasicBlock* curJump = *jumpTab; assert(curJump->countOfInEdges() > 0); // If val matches switchVal or we are at the last entry and // we never found the switch value then set the new jump dest if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1))) { if (curJump != block->bbNext) { /* transform the basic block into a BBJ_ALWAYS */ block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = curJump; } else { /* transform the basic block into a BBJ_NONE */ block->bbJumpKind = BBJ_NONE; } foundVal = true; } else { /* Remove 'block' from the predecessor list of 'curJump' */ fgRemoveRefPred(curJump, block); } } assert(foundVal); #ifdef DEBUG if (verbose) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); printf(FMT_BB " becomes a %s", block->bbNum, block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); if (block->bbJumpKind == BBJ_ALWAYS) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } printf("\n"); } #endif } } return result; } //------------------------------------------------------------------------ // fgMorphBlockStmt: morph a single statement in a block. // // Arguments: // block - block containing the statement // stmt - statement to morph // msg - string to identify caller in a dump // // Returns: // true if 'stmt' was removed from the block. // s false if 'stmt' is still in the block (even if other statements were removed). // // Notes: // Can be called anytime, unlike fgMorphStmts() which should only be called once. // bool Compiler::fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg)) { assert(block != nullptr); assert(stmt != nullptr); // Reset some ambient state fgRemoveRestOfBlock = false; compCurBB = block; compCurStmt = stmt; GenTree* morph = fgMorphTree(stmt->GetRootNode()); // Bug 1106830 - During the CSE phase we can't just remove // morph->AsOp()->gtOp2 as it could contain CSE expressions. // This leads to a noway_assert in OptCSE.cpp when // searching for the removed CSE ref. (using gtFindLink) // if (!optValnumCSE_phase) { // Check for morph as a GT_COMMA with an unconditional throw if (fgIsCommaThrow(morph, true)) { #ifdef DEBUG if (verbose) { printf("Folding a top-level fgIsCommaThrow stmt\n"); printf("Removing op2 as unreachable:\n"); gtDispTree(morph->AsOp()->gtOp2); printf("\n"); } #endif // Use the call as the new stmt morph = morph->AsOp()->gtOp1; noway_assert(morph->gtOper == GT_CALL); } // we can get a throw as a statement root if (fgIsThrow(morph)) { #ifdef DEBUG if (verbose) { printf("We have a top-level fgIsThrow stmt\n"); printf("Removing the rest of block as unreachable:\n"); } #endif noway_assert((morph->gtFlags & GTF_COLON_COND) == 0); fgRemoveRestOfBlock = true; } } stmt->SetRootNode(morph); // Can the entire tree be removed? bool removedStmt = false; // Defer removing statements during CSE so we don't inadvertently remove any CSE defs. if (!optValnumCSE_phase) { removedStmt = fgCheckRemoveStmt(block, stmt); } // Or this is the last statement of a conditional branch that was just folded? if (!removedStmt && (stmt->GetNextStmt() == nullptr) && !fgRemoveRestOfBlock) { FoldResult const fr = fgFoldConditional(block); removedStmt = (fr == FoldResult::FOLD_REMOVED_LAST_STMT); } if (!removedStmt) { // Have to re-do the evaluation order since for example some later code does not expect constants as op1 gtSetStmtInfo(stmt); // Have to re-link the nodes for this statement fgSetStmtSeq(stmt); } #ifdef DEBUG if (verbose) { printf("%s %s tree:\n", msg, (removedStmt ? "removed" : "morphed")); gtDispTree(morph); printf("\n"); } #endif if (fgRemoveRestOfBlock) { // Remove the rest of the stmts in the block for (Statement* removeStmt : StatementList(stmt->GetNextStmt())) { fgRemoveStmt(block, removeStmt); } // The rest of block has been removed and we will always throw an exception. // // For compDbgCode, we prepend an empty BB as the firstBB, it is BBJ_NONE. // We should not convert it to a ThrowBB. if ((block != fgFirstBB) || ((fgFirstBB->bbFlags & BBF_INTERNAL) == 0)) { // Convert block to a throw bb fgConvertBBToThrowBB(block); } #ifdef DEBUG if (verbose) { printf("\n%s Block " FMT_BB " becomes a throw block.\n", msg, block->bbNum); } #endif fgRemoveRestOfBlock = false; } return removedStmt; } /***************************************************************************** * * Morph the statements of the given block. * This function should be called just once for a block. Use fgMorphBlockStmt() * for reentrant calls. */ void Compiler::fgMorphStmts(BasicBlock* block) { fgRemoveRestOfBlock = false; fgCurrentlyInUseArgTemps = hashBv::Create(this); for (Statement* const stmt : block->Statements()) { if (fgRemoveRestOfBlock) { fgRemoveStmt(block, stmt); continue; } #ifdef FEATURE_SIMD if (opts.OptimizationEnabled() && stmt->GetRootNode()->TypeGet() == TYP_FLOAT && stmt->GetRootNode()->OperGet() == GT_ASG) { fgMorphCombineSIMDFieldAssignments(block, stmt); } #endif fgMorphStmt = stmt; compCurStmt = stmt; GenTree* oldTree = stmt->GetRootNode(); #ifdef DEBUG unsigned oldHash = verbose ? gtHashValue(oldTree) : DUMMY_INIT(~0); if (verbose) { printf("\nfgMorphTree " FMT_BB ", " FMT_STMT " (before)\n", block->bbNum, stmt->GetID()); gtDispTree(oldTree); } #endif /* Morph this statement tree */ GenTree* morphedTree = fgMorphTree(oldTree); // mark any outgoing arg temps as free so we can reuse them in the next statement. fgCurrentlyInUseArgTemps->ZeroAll(); // Has fgMorphStmt been sneakily changed ? if ((stmt->GetRootNode() != oldTree) || (block != compCurBB)) { if (stmt->GetRootNode() != oldTree) { /* This must be tailcall. Ignore 'morphedTree' and carry on with the tail-call node */ morphedTree = stmt->GetRootNode(); } else { /* This must be a tailcall that caused a GCPoll to get injected. We haven't actually morphed the call yet but the flag still got set, clear it here... */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG morphedTree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; #endif } noway_assert(compTailCallUsed); noway_assert(morphedTree->gtOper == GT_CALL); GenTreeCall* call = morphedTree->AsCall(); // Could be // - a fast call made as jmp in which case block will be ending with // BBJ_RETURN (as we need epilog) and marked as containing a jmp. // - a tailcall dispatched via JIT helper, on x86, in which case // block will be ending with BBJ_THROW. // - a tail call dispatched via runtime help (IL stubs), in which // case there will not be any tailcall and the block will be ending // with BBJ_RETURN (as normal control flow) noway_assert((call->IsFastTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN) && ((compCurBB->bbFlags & BBF_HAS_JMP)) != 0) || (call->IsTailCallViaJitHelper() && (compCurBB->bbJumpKind == BBJ_THROW)) || (!call->IsTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN))); } #ifdef DEBUG if (compStressCompile(STRESS_CLONE_EXPR, 30)) { // Clone all the trees to stress gtCloneExpr() if (verbose) { printf("\nfgMorphTree (stressClone from):\n"); gtDispTree(morphedTree); } morphedTree = gtCloneExpr(morphedTree); noway_assert(morphedTree != nullptr); if (verbose) { printf("\nfgMorphTree (stressClone to):\n"); gtDispTree(morphedTree); } } /* If the hash value changes. we modified the tree during morphing */ if (verbose) { unsigned newHash = gtHashValue(morphedTree); if (newHash != oldHash) { printf("\nfgMorphTree " FMT_BB ", " FMT_STMT " (after)\n", block->bbNum, stmt->GetID()); gtDispTree(morphedTree); } } #endif /* Check for morphedTree as a GT_COMMA with an unconditional throw */ if (!gtIsActiveCSE_Candidate(morphedTree) && fgIsCommaThrow(morphedTree, true)) { /* Use the call as the new stmt */ morphedTree = morphedTree->AsOp()->gtOp1; noway_assert(morphedTree->gtOper == GT_CALL); noway_assert((morphedTree->gtFlags & GTF_COLON_COND) == 0); fgRemoveRestOfBlock = true; } stmt->SetRootNode(morphedTree); if (fgRemoveRestOfBlock) { continue; } /* Has the statement been optimized away */ if (fgCheckRemoveStmt(block, stmt)) { continue; } /* Check if this block ends with a conditional branch that can be folded */ if (fgFoldConditional(block) != FoldResult::FOLD_DID_NOTHING) { continue; } if (ehBlockHasExnFlowDsc(block)) { continue; } } if (fgRemoveRestOfBlock) { if ((block->bbJumpKind == BBJ_COND) || (block->bbJumpKind == BBJ_SWITCH)) { Statement* first = block->firstStmt(); noway_assert(first); Statement* lastStmt = block->lastStmt(); noway_assert(lastStmt && lastStmt->GetNextStmt() == nullptr); GenTree* last = lastStmt->GetRootNode(); if (((block->bbJumpKind == BBJ_COND) && (last->gtOper == GT_JTRUE)) || ((block->bbJumpKind == BBJ_SWITCH) && (last->gtOper == GT_SWITCH))) { GenTree* op1 = last->AsOp()->gtOp1; if (op1->OperIsCompare()) { /* Unmark the comparison node with GTF_RELOP_JMP_USED */ op1->gtFlags &= ~GTF_RELOP_JMP_USED; } lastStmt->SetRootNode(fgMorphTree(op1)); } } /* Mark block as a BBJ_THROW block */ fgConvertBBToThrowBB(block); } #if FEATURE_FASTTAILCALL GenTree* recursiveTailCall = nullptr; if (block->endsWithTailCallConvertibleToLoop(this, &recursiveTailCall)) { fgMorphRecursiveFastTailCallIntoLoop(block, recursiveTailCall->AsCall()); } #endif // Reset this back so that it doesn't leak out impacting other blocks fgRemoveRestOfBlock = false; } /***************************************************************************** * * Morph the blocks of the method. * Returns true if the basic block list is modified. * This function should be called just once. */ void Compiler::fgMorphBlocks() { #ifdef DEBUG if (verbose) { printf("\n*************** In fgMorphBlocks()\n"); } #endif /* Since fgMorphTree can be called after various optimizations to re-arrange * the nodes we need a global flag to signal if we are during the one-pass * global morphing */ fgGlobalMorph = true; // // Local assertion prop is enabled if we are optimized // optLocalAssertionProp = opts.OptimizationEnabled(); if (optLocalAssertionProp) { // // Initialize for local assertion prop // optAssertionInit(true); } if (!compEnregLocals()) { // Morph is checking if lvDoNotEnregister is already set for some optimizations. // If we are running without `CLFLG_REGVAR` flag set (`compEnregLocals() == false`) // then we already know that we won't enregister any locals and it is better to set // this flag before we start reading it. // The main reason why this flag is not set is that we are running in minOpts. lvSetMinOptsDoNotEnreg(); } /*------------------------------------------------------------------------- * Process all basic blocks in the function */ BasicBlock* block = fgFirstBB; noway_assert(block); do { #ifdef DEBUG if (verbose) { printf("\nMorphing " FMT_BB " of '%s'\n", block->bbNum, info.compFullName); } #endif if (optLocalAssertionProp) { // // Clear out any currently recorded assertion candidates // before processing each basic block, // also we must handle QMARK-COLON specially // optAssertionReset(0); } // Make the current basic block address available globally. compCurBB = block; // Process all statement trees in the basic block. fgMorphStmts(block); // Do we need to merge the result of this block into a single return block? if ((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { if ((genReturnBB != nullptr) && (genReturnBB != block)) { fgMergeBlockReturn(block); } } block = block->bbNext; } while (block != nullptr); // We are done with the global morphing phase fgGlobalMorph = false; compCurBB = nullptr; // Under OSR, we no longer need to specially protect the original method entry // if (opts.IsOSR() && (fgEntryBB != nullptr) && (fgEntryBB->bbFlags & BBF_IMPORTED)) { JITDUMP("OSR: un-protecting original method entry " FMT_BB "\n", fgEntryBB->bbNum); assert(fgEntryBB->bbRefs > 0); fgEntryBB->bbRefs--; // We don't need to remember this block anymore. fgEntryBB = nullptr; } #ifdef DEBUG if (verboseTrees) { fgDispBasicBlocks(true); } #endif } //------------------------------------------------------------------------ // fgMergeBlockReturn: assign the block return value (if any) into the single return temp // and branch to the single return block. // // Arguments: // block - the block to process. // // Notes: // A block is not guaranteed to have a last stmt if its jump kind is BBJ_RETURN. // For example a method returning void could have an empty block with jump kind BBJ_RETURN. // Such blocks do materialize as part of in-lining. // // A block with jump kind BBJ_RETURN does not necessarily need to end with GT_RETURN. // It could end with a tail call or rejected tail call or monitor.exit or a GT_INTRINSIC. // For now it is safe to explicitly check whether last stmt is GT_RETURN if genReturnLocal // is BAD_VAR_NUM. // void Compiler::fgMergeBlockReturn(BasicBlock* block) { assert((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)); assert((genReturnBB != nullptr) && (genReturnBB != block)); // TODO: Need to characterize the last top level stmt of a block ending with BBJ_RETURN. Statement* lastStmt = block->lastStmt(); GenTree* ret = (lastStmt != nullptr) ? lastStmt->GetRootNode() : nullptr; if ((ret != nullptr) && (ret->OperGet() == GT_RETURN) && ((ret->gtFlags & GTF_RET_MERGED) != 0)) { // This return was generated during epilog merging, so leave it alone } else { // We'll jump to the genReturnBB. CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_X86) if (info.compFlags & CORINFO_FLG_SYNCH) { fgConvertSyncReturnToLeave(block); } else #endif // !TARGET_X86 { block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = genReturnBB; fgAddRefPred(genReturnBB, block); fgReturnCount--; } if (genReturnLocal != BAD_VAR_NUM) { // replace the GT_RETURN node to be a GT_ASG that stores the return value into genReturnLocal. // Method must be returning a value other than TYP_VOID. noway_assert(compMethodHasRetVal()); // This block must be ending with a GT_RETURN noway_assert(lastStmt != nullptr); noway_assert(lastStmt->GetNextStmt() == nullptr); noway_assert(ret != nullptr); // GT_RETURN must have non-null operand as the method is returning the value assigned to // genReturnLocal noway_assert(ret->OperGet() == GT_RETURN); noway_assert(ret->gtGetOp1() != nullptr); Statement* pAfterStatement = lastStmt; const DebugInfo& di = lastStmt->GetDebugInfo(); GenTree* tree = gtNewTempAssign(genReturnLocal, ret->gtGetOp1(), &pAfterStatement, di, block); if (tree->OperIsCopyBlkOp()) { tree = fgMorphCopyBlock(tree); } else if (tree->OperIsInitBlkOp()) { tree = fgMorphInitBlock(tree); } if (pAfterStatement == lastStmt) { lastStmt->SetRootNode(tree); } else { // gtNewTempAssign inserted additional statements after last fgRemoveStmt(block, lastStmt); Statement* newStmt = gtNewStmt(tree, di); fgInsertStmtAfter(block, pAfterStatement, newStmt); lastStmt = newStmt; } } else if (ret != nullptr && ret->OperGet() == GT_RETURN) { // This block ends with a GT_RETURN noway_assert(lastStmt != nullptr); noway_assert(lastStmt->GetNextStmt() == nullptr); // Must be a void GT_RETURN with null operand; delete it as this block branches to oneReturn // block noway_assert(ret->TypeGet() == TYP_VOID); noway_assert(ret->gtGetOp1() == nullptr); fgRemoveStmt(block, lastStmt); } JITDUMP("\nUpdate " FMT_BB " to jump to common return block.\n", block->bbNum); DISPBLOCK(block); if (block->hasProfileWeight()) { weight_t const oldWeight = genReturnBB->hasProfileWeight() ? genReturnBB->bbWeight : BB_ZERO_WEIGHT; weight_t const newWeight = oldWeight + block->bbWeight; JITDUMP("merging profile weight " FMT_WT " from " FMT_BB " to common return " FMT_BB "\n", block->bbWeight, block->bbNum, genReturnBB->bbNum); genReturnBB->setBBProfileWeight(newWeight); DISPBLOCK(genReturnBB); } } } /***************************************************************************** * * Make some decisions about the kind of code to generate. */ void Compiler::fgSetOptions() { #ifdef DEBUG /* Should we force fully interruptible code ? */ if (JitConfig.JitFullyInt() || compStressCompile(STRESS_GENERIC_VARN, 30)) { noway_assert(!codeGen->isGCTypeFixed()); SetInterruptible(true); } #endif if (opts.compDbgCode) { assert(!codeGen->isGCTypeFixed()); SetInterruptible(true); // debugging is easier this way ... } /* Assume we won't need an explicit stack frame if this is allowed */ if (compLocallocUsed) { codeGen->setFramePointerRequired(true); } #ifdef TARGET_X86 if (compTailCallUsed) codeGen->setFramePointerRequired(true); #endif // TARGET_X86 if (!opts.genFPopt) { codeGen->setFramePointerRequired(true); } // Assert that the EH table has been initialized by now. Note that // compHndBBtabAllocCount never decreases; it is a high-water mark // of table allocation. In contrast, compHndBBtabCount does shrink // if we delete a dead EH region, and if it shrinks to zero, the // table pointer compHndBBtab is unreliable. assert(compHndBBtabAllocCount >= info.compXcptnsCount); #ifdef TARGET_X86 // Note: this case, and the !X86 case below, should both use the // !X86 path. This would require a few more changes for X86 to use // compHndBBtabCount (the current number of EH clauses) instead of // info.compXcptnsCount (the number of EH clauses in IL), such as // in ehNeedsShadowSPslots(). This is because sometimes the IL has // an EH clause that we delete as statically dead code before we // get here, leaving no EH clauses left, and thus no requirement // to use a frame pointer because of EH. But until all the code uses // the same test, leave info.compXcptnsCount here. if (info.compXcptnsCount > 0) { codeGen->setFramePointerRequiredEH(true); } #else // !TARGET_X86 if (compHndBBtabCount > 0) { codeGen->setFramePointerRequiredEH(true); } #endif // TARGET_X86 #ifdef UNIX_X86_ABI if (info.compXcptnsCount > 0) { assert(!codeGen->isGCTypeFixed()); // Enforce fully interruptible codegen for funclet unwinding SetInterruptible(true); } #endif // UNIX_X86_ABI if (compMethodRequiresPInvokeFrame()) { codeGen->setFramePointerRequired(true); // Setup of Pinvoke frame currently requires an EBP style frame } if (info.compPublishStubParam) { codeGen->setFramePointerRequiredGCInfo(true); } if (compIsProfilerHookNeeded()) { codeGen->setFramePointerRequired(true); } if (info.compIsVarArgs) { // Code that initializes lvaVarargsBaseOfStkArgs requires this to be EBP relative. codeGen->setFramePointerRequiredGCInfo(true); } if (lvaReportParamTypeArg()) { codeGen->setFramePointerRequiredGCInfo(true); } // printf("method will %s be fully interruptible\n", GetInterruptible() ? " " : "not"); } /*****************************************************************************/ GenTree* Compiler::fgInitThisClass() { noway_assert(!compIsForInlining()); CORINFO_LOOKUP_KIND kind; info.compCompHnd->getLocationOfThisType(info.compMethodHnd, &kind); if (!kind.needsRuntimeLookup) { return fgGetSharedCCtor(info.compClassHnd); } else { #ifdef FEATURE_READYTORUN // Only CoreRT understands CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE. Don't do this on CoreCLR. if (opts.IsReadyToRun() && IsTargetAbi(CORINFO_CORERT_ABI)) { CORINFO_RESOLVED_TOKEN resolvedToken; memset(&resolvedToken, 0, sizeof(resolvedToken)); // We are in a shared method body, but maybe we don't need a runtime lookup after all. // This covers the case of a generic method on a non-generic type. if (!(info.compClassAttr & CORINFO_FLG_SHAREDINST)) { resolvedToken.hClass = info.compClassHnd; return impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF); } // We need a runtime lookup. GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind); // CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE with a zeroed out resolvedToken means "get the static // base of the class that owns the method being compiled". If we're in this method, it means we're not // inlining and there's no ambiguity. return impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, TYP_BYREF, gtNewCallArgs(ctxTree), &kind); } #endif // Collectible types requires that for shared generic code, if we use the generic context paramter // that we report it. (This is a conservative approach, we could detect some cases particularly when the // context parameter is this that we don't need the eager reporting logic.) lvaGenericsContextInUse = true; switch (kind.runtimeLookupKind) { case CORINFO_LOOKUP_THISOBJ: { // This code takes a this pointer; but we need to pass the static method desc to get the right point in // the hierarchy GenTree* vtTree = gtNewLclvNode(info.compThisArg, TYP_REF); vtTree->gtFlags |= GTF_VAR_CONTEXT; // Vtable pointer of this object vtTree = gtNewMethodTableLookup(vtTree); GenTree* methodHnd = gtNewIconEmbMethHndNode(info.compMethodHnd); return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS, TYP_VOID, gtNewCallArgs(vtTree, methodHnd)); } case CORINFO_LOOKUP_CLASSPARAM: { GenTree* vtTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); vtTree->gtFlags |= GTF_VAR_CONTEXT; return gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewCallArgs(vtTree)); } case CORINFO_LOOKUP_METHODPARAM: { GenTree* methHndTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); methHndTree->gtFlags |= GTF_VAR_CONTEXT; return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS, TYP_VOID, gtNewCallArgs(gtNewIconNode(0), methHndTree)); } default: noway_assert(!"Unknown LOOKUP_KIND"); UNREACHABLE(); } } } #ifdef DEBUG /***************************************************************************** * * Tree walk callback to make sure no GT_QMARK nodes are present in the tree, * except for the allowed ? 1 : 0; pattern. */ Compiler::fgWalkResult Compiler::fgAssertNoQmark(GenTree** tree, fgWalkData* data) { if ((*tree)->OperGet() == GT_QMARK) { fgCheckQmarkAllowedForm(*tree); } return WALK_CONTINUE; } void Compiler::fgCheckQmarkAllowedForm(GenTree* tree) { assert(tree->OperGet() == GT_QMARK); assert(!"Qmarks beyond morph disallowed."); } /***************************************************************************** * * Verify that the importer has created GT_QMARK nodes in a way we can * process them. The following is allowed: * * 1. A top level qmark. Top level qmark is of the form: * a) (bool) ? (void) : (void) OR * b) V0N = (bool) ? (type) : (type) * * 2. Recursion is allowed at the top level, i.e., a GT_QMARK can be a child * of either op1 of colon or op2 of colon but not a child of any other * operator. */ void Compiler::fgPreExpandQmarkChecks(GenTree* expr) { GenTree* topQmark = fgGetTopLevelQmark(expr); // If the top level Qmark is null, then scan the tree to make sure // there are no qmarks within it. if (topQmark == nullptr) { fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr); } else { // We could probably expand the cond node also, but don't think the extra effort is necessary, // so let's just assert the cond node of a top level qmark doesn't have further top level qmarks. fgWalkTreePre(&topQmark->AsOp()->gtOp1, Compiler::fgAssertNoQmark, nullptr); fgPreExpandQmarkChecks(topQmark->AsOp()->gtOp2->AsOp()->gtOp1); fgPreExpandQmarkChecks(topQmark->AsOp()->gtOp2->AsOp()->gtOp2); } } #endif // DEBUG /***************************************************************************** * * Get the top level GT_QMARK node in a given "expr", return NULL if such a * node is not present. If the top level GT_QMARK node is assigned to a * GT_LCL_VAR, then return the lcl node in ppDst. * */ GenTree* Compiler::fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst /* = NULL */) { if (ppDst != nullptr) { *ppDst = nullptr; } GenTree* topQmark = nullptr; if (expr->gtOper == GT_QMARK) { topQmark = expr; } else if (expr->gtOper == GT_ASG && expr->AsOp()->gtOp2->gtOper == GT_QMARK && expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { topQmark = expr->AsOp()->gtOp2; if (ppDst != nullptr) { *ppDst = expr->AsOp()->gtOp1; } } return topQmark; } /********************************************************************************* * * For a castclass helper call, * Importer creates the following tree: * tmp = (op1 == null) ? op1 : ((*op1 == (cse = op2, cse)) ? op1 : helper()); * * This method splits the qmark expression created by the importer into the * following blocks: (block, asg, cond1, cond2, helper, remainder) * Notice that op1 is the result for both the conditions. So we coalesce these * assignments into a single block instead of two blocks resulting a nested diamond. * * +---------->-----------+ * | | | * ^ ^ v * | | | * block-->asg-->cond1--+-->cond2--+-->helper--+-->remainder * * We expect to achieve the following codegen: * mov rsi, rdx tmp = op1 // asgBlock * test rsi, rsi goto skip if tmp == null ? // cond1Block * je SKIP * mov rcx, 0x76543210 cns = op2 // cond2Block * cmp qword ptr [rsi], rcx goto skip if *tmp == op2 * je SKIP * call CORINFO_HELP_CHKCASTCLASS_SPECIAL tmp = helper(cns, tmp) // helperBlock * mov rsi, rax * SKIP: // remainderBlock * tmp has the result. * */ void Compiler::fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt) { #ifdef DEBUG if (verbose) { printf("\nExpanding CastInstOf qmark in " FMT_BB " (before)\n", block->bbNum); fgDispBasicBlocks(block, block, true); } #endif // DEBUG GenTree* expr = stmt->GetRootNode(); GenTree* dst = nullptr; GenTree* qmark = fgGetTopLevelQmark(expr, &dst); noway_assert(dst != nullptr); assert(qmark->gtFlags & GTF_QMARK_CAST_INSTOF); // Get cond, true, false exprs for the qmark. GenTree* condExpr = qmark->gtGetOp1(); GenTree* trueExpr = qmark->gtGetOp2()->AsColon()->ThenNode(); GenTree* falseExpr = qmark->gtGetOp2()->AsColon()->ElseNode(); // Get cond, true, false exprs for the nested qmark. GenTree* nestedQmark = falseExpr; GenTree* cond2Expr; GenTree* true2Expr; GenTree* false2Expr; if (nestedQmark->gtOper == GT_QMARK) { cond2Expr = nestedQmark->gtGetOp1(); true2Expr = nestedQmark->gtGetOp2()->AsColon()->ThenNode(); false2Expr = nestedQmark->gtGetOp2()->AsColon()->ElseNode(); } else { // This is a rare case that arises when we are doing minopts and encounter isinst of null // gtFoldExpr was still is able to optimize away part of the tree (but not all). // That means it does not match our pattern. // Rather than write code to handle this case, just fake up some nodes to make it match the common // case. Synthesize a comparison that is always true, and for the result-on-true, use the // entire subtree we expected to be the nested question op. cond2Expr = gtNewOperNode(GT_EQ, TYP_INT, gtNewIconNode(0, TYP_I_IMPL), gtNewIconNode(0, TYP_I_IMPL)); true2Expr = nestedQmark; false2Expr = gtNewIconNode(0, TYP_I_IMPL); } assert(false2Expr->OperGet() == trueExpr->OperGet()); // Create the chain of blocks. See method header comment. // The order of blocks after this is the following: // block ... asgBlock ... cond1Block ... cond2Block ... helperBlock ... remainderBlock // // We need to remember flags that exist on 'block' that we want to propagate to 'remainderBlock', // if they are going to be cleared by fgSplitBlockAfterStatement(). We currently only do this only // for the GC safe point bit, the logic being that if 'block' was marked gcsafe, then surely // remainderBlock will still be GC safe. BasicBlockFlags propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT; BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt); fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock. BasicBlock* helperBlock = fgNewBBafter(BBJ_NONE, block, true); BasicBlock* cond2Block = fgNewBBafter(BBJ_COND, block, true); BasicBlock* cond1Block = fgNewBBafter(BBJ_COND, block, true); BasicBlock* asgBlock = fgNewBBafter(BBJ_NONE, block, true); remainderBlock->bbFlags |= propagateFlags; // These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter). // If they're not internal, mark them as imported to avoid asserts about un-imported blocks. if ((block->bbFlags & BBF_INTERNAL) == 0) { helperBlock->bbFlags &= ~BBF_INTERNAL; cond2Block->bbFlags &= ~BBF_INTERNAL; cond1Block->bbFlags &= ~BBF_INTERNAL; asgBlock->bbFlags &= ~BBF_INTERNAL; helperBlock->bbFlags |= BBF_IMPORTED; cond2Block->bbFlags |= BBF_IMPORTED; cond1Block->bbFlags |= BBF_IMPORTED; asgBlock->bbFlags |= BBF_IMPORTED; } // Chain the flow correctly. fgAddRefPred(asgBlock, block); fgAddRefPred(cond1Block, asgBlock); fgAddRefPred(cond2Block, cond1Block); fgAddRefPred(helperBlock, cond2Block); fgAddRefPred(remainderBlock, helperBlock); fgAddRefPred(remainderBlock, cond1Block); fgAddRefPred(remainderBlock, cond2Block); cond1Block->bbJumpDest = remainderBlock; cond2Block->bbJumpDest = remainderBlock; // Set the weights; some are guesses. asgBlock->inheritWeight(block); cond1Block->inheritWeight(block); cond2Block->inheritWeightPercentage(cond1Block, 50); helperBlock->inheritWeightPercentage(cond2Block, 50); // Append cond1 as JTRUE to cond1Block GenTree* jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, condExpr); Statement* jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo()); fgInsertStmtAtEnd(cond1Block, jmpStmt); // Append cond2 as JTRUE to cond2Block jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, cond2Expr); jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo()); fgInsertStmtAtEnd(cond2Block, jmpStmt); // AsgBlock should get tmp = op1 assignment. trueExpr = gtNewTempAssign(dst->AsLclVarCommon()->GetLclNum(), trueExpr); Statement* trueStmt = fgNewStmtFromTree(trueExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(asgBlock, trueStmt); // Since we are adding helper in the JTRUE false path, reverse the cond2 and add the helper. gtReverseCond(cond2Expr); GenTree* helperExpr = gtNewTempAssign(dst->AsLclVarCommon()->GetLclNum(), true2Expr); Statement* helperStmt = fgNewStmtFromTree(helperExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(helperBlock, helperStmt); // Finally remove the nested qmark stmt. fgRemoveStmt(block, stmt); if (true2Expr->OperIs(GT_CALL) && (true2Expr->AsCall()->gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN)) { fgConvertBBToThrowBB(helperBlock); } #ifdef DEBUG if (verbose) { printf("\nExpanding CastInstOf qmark in " FMT_BB " (after)\n", block->bbNum); fgDispBasicBlocks(block, remainderBlock, true); } #endif // DEBUG } /***************************************************************************** * * Expand a statement with a top level qmark node. There are three cases, based * on whether the qmark has both "true" and "false" arms, or just one of them. * * S0; * C ? T : F; * S1; * * Generates ===> * * bbj_always * +---->------+ * false | | * S0 -->-- ~C -->-- T F -->-- S1 * | | * +--->--------+ * bbj_cond(true) * * ----------------------------------------- * * S0; * C ? T : NOP; * S1; * * Generates ===> * * false * S0 -->-- ~C -->-- T -->-- S1 * | | * +-->-------------+ * bbj_cond(true) * * ----------------------------------------- * * S0; * C ? NOP : F; * S1; * * Generates ===> * * false * S0 -->-- C -->-- F -->-- S1 * | | * +-->------------+ * bbj_cond(true) * * If the qmark assigns to a variable, then create tmps for "then" * and "else" results and assign the temp to the variable as a writeback step. */ void Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) { GenTree* expr = stmt->GetRootNode(); // Retrieve the Qmark node to be expanded. GenTree* dst = nullptr; GenTree* qmark = fgGetTopLevelQmark(expr, &dst); if (qmark == nullptr) { return; } if (qmark->gtFlags & GTF_QMARK_CAST_INSTOF) { fgExpandQmarkForCastInstOf(block, stmt); return; } #ifdef DEBUG if (verbose) { printf("\nExpanding top-level qmark in " FMT_BB " (before)\n", block->bbNum); fgDispBasicBlocks(block, block, true); } #endif // DEBUG // Retrieve the operands. GenTree* condExpr = qmark->gtGetOp1(); GenTree* trueExpr = qmark->gtGetOp2()->AsColon()->ThenNode(); GenTree* falseExpr = qmark->gtGetOp2()->AsColon()->ElseNode(); assert(!varTypeIsFloating(condExpr->TypeGet())); bool hasTrueExpr = (trueExpr->OperGet() != GT_NOP); bool hasFalseExpr = (falseExpr->OperGet() != GT_NOP); assert(hasTrueExpr || hasFalseExpr); // We expect to have at least one arm of the qmark! // Create remainder, cond and "else" blocks. After this, the blocks are in this order: // block ... condBlock ... elseBlock ... remainderBlock // // We need to remember flags that exist on 'block' that we want to propagate to 'remainderBlock', // if they are going to be cleared by fgSplitBlockAfterStatement(). We currently only do this only // for the GC safe point bit, the logic being that if 'block' was marked gcsafe, then surely // remainderBlock will still be GC safe. BasicBlockFlags propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT; BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt); fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock. BasicBlock* condBlock = fgNewBBafter(BBJ_COND, block, true); BasicBlock* elseBlock = fgNewBBafter(BBJ_NONE, condBlock, true); // These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter). // If they're not internal, mark them as imported to avoid asserts about un-imported blocks. if ((block->bbFlags & BBF_INTERNAL) == 0) { condBlock->bbFlags &= ~BBF_INTERNAL; elseBlock->bbFlags &= ~BBF_INTERNAL; condBlock->bbFlags |= BBF_IMPORTED; elseBlock->bbFlags |= BBF_IMPORTED; } remainderBlock->bbFlags |= propagateFlags; condBlock->inheritWeight(block); fgAddRefPred(condBlock, block); fgAddRefPred(elseBlock, condBlock); fgAddRefPred(remainderBlock, elseBlock); BasicBlock* thenBlock = nullptr; if (hasTrueExpr && hasFalseExpr) { // bbj_always // +---->------+ // false | | // S0 -->-- ~C -->-- T F -->-- S1 // | | // +--->--------+ // bbj_cond(true) // gtReverseCond(condExpr); condBlock->bbJumpDest = elseBlock; thenBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true); thenBlock->bbJumpDest = remainderBlock; if ((block->bbFlags & BBF_INTERNAL) == 0) { thenBlock->bbFlags &= ~BBF_INTERNAL; thenBlock->bbFlags |= BBF_IMPORTED; } fgAddRefPred(thenBlock, condBlock); fgAddRefPred(remainderBlock, thenBlock); thenBlock->inheritWeightPercentage(condBlock, 50); elseBlock->inheritWeightPercentage(condBlock, 50); } else if (hasTrueExpr) { // false // S0 -->-- ~C -->-- T -->-- S1 // | | // +-->-------------+ // bbj_cond(true) // gtReverseCond(condExpr); condBlock->bbJumpDest = remainderBlock; fgAddRefPred(remainderBlock, condBlock); // Since we have no false expr, use the one we'd already created. thenBlock = elseBlock; elseBlock = nullptr; thenBlock->inheritWeightPercentage(condBlock, 50); } else if (hasFalseExpr) { // false // S0 -->-- C -->-- F -->-- S1 // | | // +-->------------+ // bbj_cond(true) // condBlock->bbJumpDest = remainderBlock; fgAddRefPred(remainderBlock, condBlock); elseBlock->inheritWeightPercentage(condBlock, 50); } GenTree* jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, qmark->gtGetOp1()); Statement* jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo()); fgInsertStmtAtEnd(condBlock, jmpStmt); // Remove the original qmark statement. fgRemoveStmt(block, stmt); // Since we have top level qmarks, we either have a dst for it in which case // we need to create tmps for true and falseExprs, else just don't bother // assigning. unsigned lclNum = BAD_VAR_NUM; if (dst != nullptr) { assert(dst->gtOper == GT_LCL_VAR); lclNum = dst->AsLclVar()->GetLclNum(); } else { assert(qmark->TypeGet() == TYP_VOID); } if (hasTrueExpr) { if (dst != nullptr) { trueExpr = gtNewTempAssign(lclNum, trueExpr); } Statement* trueStmt = fgNewStmtFromTree(trueExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(thenBlock, trueStmt); } // Assign the falseExpr into the dst or tmp, insert in elseBlock if (hasFalseExpr) { if (dst != nullptr) { falseExpr = gtNewTempAssign(lclNum, falseExpr); } Statement* falseStmt = fgNewStmtFromTree(falseExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(elseBlock, falseStmt); } #ifdef DEBUG if (verbose) { printf("\nExpanding top-level qmark in " FMT_BB " (after)\n", block->bbNum); fgDispBasicBlocks(block, remainderBlock, true); } #endif // DEBUG } /***************************************************************************** * * Expand GT_QMARK nodes from the flow graph into basic blocks. * */ void Compiler::fgExpandQmarkNodes() { if (compQmarkUsed) { for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { GenTree* expr = stmt->GetRootNode(); #ifdef DEBUG fgPreExpandQmarkChecks(expr); #endif fgExpandQmarkStmt(block, stmt); } } #ifdef DEBUG fgPostExpandQmarkChecks(); #endif } compQmarkRationalized = true; } #ifdef DEBUG /***************************************************************************** * * Make sure we don't have any more GT_QMARK nodes. * */ void Compiler::fgPostExpandQmarkChecks() { for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { GenTree* expr = stmt->GetRootNode(); fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr); } } } #endif /***************************************************************************** * * Promoting struct locals */ void Compiler::fgPromoteStructs() { #ifdef DEBUG if (verbose) { printf("*************** In fgPromoteStructs()\n"); } #endif // DEBUG if (!opts.OptEnabled(CLFLG_STRUCTPROMOTE)) { JITDUMP(" promotion opt flag not enabled\n"); return; } if (fgNoStructPromotion) { JITDUMP(" promotion disabled by JitNoStructPromotion\n"); return; } #if 0 // The code in this #if has been useful in debugging struct promotion issues, by // enabling selective enablement of the struct promotion optimization according to // method hash. #ifdef DEBUG unsigned methHash = info.compMethodHash(); char* lostr = getenv("structpromohashlo"); unsigned methHashLo = 0; if (lostr != NULL) { sscanf_s(lostr, "%x", &methHashLo); } char* histr = getenv("structpromohashhi"); unsigned methHashHi = UINT32_MAX; if (histr != NULL) { sscanf_s(histr, "%x", &methHashHi); } if (methHash < methHashLo || methHash > methHashHi) { return; } else { printf("Promoting structs for method %s, hash = 0x%x.\n", info.compFullName, info.compMethodHash()); printf(""); // in our logic this causes a flush } #endif // DEBUG #endif // 0 if (info.compIsVarArgs) { JITDUMP(" promotion disabled because of varargs\n"); return; } #ifdef DEBUG if (verbose) { printf("\nlvaTable before fgPromoteStructs\n"); lvaTableDump(); } #endif // DEBUG // The lvaTable might grow as we grab temps. Make a local copy here. unsigned startLvaCount = lvaCount; // // Loop through the original lvaTable. Looking for struct locals to be promoted. // lvaStructPromotionInfo structPromotionInfo; bool tooManyLocalsReported = false; // Clear the structPromotionHelper, since it is used during inlining, at which point it // may be conservative about looking up SIMD info. // We don't want to preserve those conservative decisions for the actual struct promotion. structPromotionHelper->Clear(); for (unsigned lclNum = 0; lclNum < startLvaCount; lclNum++) { // Whether this var got promoted bool promotedVar = false; LclVarDsc* varDsc = lvaGetDesc(lclNum); // If we have marked this as lvUsedInSIMDIntrinsic, then we do not want to promote // its fields. Instead, we will attempt to enregister the entire struct. if (varDsc->lvIsSIMDType() && (varDsc->lvIsUsedInSIMDIntrinsic() || isOpaqueSIMDLclVar(varDsc))) { varDsc->lvRegStruct = true; } // Don't promote if we have reached the tracking limit. else if (lvaHaveManyLocals()) { // Print the message first time when we detected this condition if (!tooManyLocalsReported) { JITDUMP("Stopped promoting struct fields, due to too many locals.\n"); } tooManyLocalsReported = true; } else if (varTypeIsStruct(varDsc)) { assert(structPromotionHelper != nullptr); promotedVar = structPromotionHelper->TryPromoteStructVar(lclNum); } if (!promotedVar && varDsc->lvIsSIMDType() && !varDsc->lvFieldAccessed) { // Even if we have not used this in a SIMD intrinsic, if it is not being promoted, // we will treat it as a reg struct. varDsc->lvRegStruct = true; } } #ifdef DEBUG if (verbose) { printf("\nlvaTable after fgPromoteStructs\n"); lvaTableDump(); } #endif // DEBUG } void Compiler::fgMorphStructField(GenTree* tree, GenTree* parent) { noway_assert(tree->OperGet() == GT_FIELD); GenTreeField* field = tree->AsField(); GenTree* objRef = field->GetFldObj(); GenTree* obj = ((objRef != nullptr) && (objRef->gtOper == GT_ADDR)) ? objRef->AsOp()->gtOp1 : nullptr; noway_assert((tree->gtFlags & GTF_GLOB_REF) || ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR))); /* Is this an instance data member? */ if ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR)) { unsigned lclNum = obj->AsLclVarCommon()->GetLclNum(); const LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varTypeIsStruct(obj)) { if (varDsc->lvPromoted) { // Promoted struct unsigned fldOffset = field->gtFldOffset; unsigned fieldLclIndex = lvaGetFieldLocal(varDsc, fldOffset); if (fieldLclIndex == BAD_VAR_NUM) { // Access a promoted struct's field with an offset that doesn't correspond to any field. // It can happen if the struct was cast to another struct with different offsets. return; } const LclVarDsc* fieldDsc = lvaGetDesc(fieldLclIndex); var_types fieldType = fieldDsc->TypeGet(); assert(fieldType != TYP_STRUCT); // promoted LCL_VAR can't have a struct type. if (tree->TypeGet() != fieldType) { if (tree->TypeGet() != TYP_STRUCT) { // This is going to be an incorrect instruction promotion. // For example when we try to read int as long. return; } if (field->gtFldHnd != fieldDsc->lvFieldHnd) { CORINFO_CLASS_HANDLE fieldTreeClass = nullptr, fieldDscClass = nullptr; CorInfoType fieldTreeType = info.compCompHnd->getFieldType(field->gtFldHnd, &fieldTreeClass); CorInfoType fieldDscType = info.compCompHnd->getFieldType(fieldDsc->lvFieldHnd, &fieldDscClass); if (fieldTreeType != fieldDscType || fieldTreeClass != fieldDscClass) { // Access the promoted field with a different class handle, can't check that types match. return; } // Access the promoted field as a field of a non-promoted struct with the same class handle. } else { // As we already checked this above, we must have a tree with a TYP_STRUCT type // assert(tree->TypeGet() == TYP_STRUCT); // The field tree accesses it as a struct, but the promoted LCL_VAR field // says that it has another type. This happens when struct promotion unwraps // a single field struct to get to its ultimate type. // // Note that currently, we cannot have a promoted LCL_VAR field with a struct type. // // This mismatch in types can lead to problems for some parent node type like GT_RETURN. // So we check the parent node and only allow this optimization when we have // a GT_ADDR or a GT_ASG. // // Note that for a GT_ASG we have to do some additional work, // see below after the SetOper(GT_LCL_VAR) // if (!parent->OperIs(GT_ADDR, GT_ASG)) { // Don't transform other operations such as GT_RETURN // return; } #ifdef DEBUG // This is an additional DEBUG-only sanity check // assert(structPromotionHelper != nullptr); structPromotionHelper->CheckRetypedAsScalar(field->gtFldHnd, fieldType); #endif // DEBUG } } tree->SetOper(GT_LCL_VAR); tree->AsLclVarCommon()->SetLclNum(fieldLclIndex); tree->gtType = fieldType; tree->gtFlags &= GTF_NODE_MASK; // Note: that clears all flags except `GTF_COLON_COND`. if (parent->gtOper == GT_ASG) { // If we are changing the left side of an assignment, we need to set // these two flags: // if (parent->AsOp()->gtOp1 == tree) { tree->gtFlags |= GTF_VAR_DEF; tree->gtFlags |= GTF_DONT_CSE; } // Promotion of struct containing struct fields where the field // is a struct with a single pointer sized scalar type field: in // this case struct promotion uses the type of the underlying // scalar field as the type of struct field instead of recursively // promoting. This can lead to a case where we have a block-asgn // with its RHS replaced with a scalar type. Mark RHS value as // DONT_CSE so that assertion prop will not do const propagation. // The reason this is required is that if RHS of a block-asg is a // constant, then it is interpreted as init-block incorrectly. // // TODO - This can also be avoided if we implement recursive struct // promotion, tracked by #10019. if (varTypeIsStruct(parent) && parent->AsOp()->gtOp2 == tree && !varTypeIsStruct(tree)) { tree->gtFlags |= GTF_DONT_CSE; } } #ifdef DEBUG if (verbose) { printf("Replacing the field in promoted struct with local var V%02u\n", fieldLclIndex); } #endif // DEBUG } } else { // Normed struct // A "normed struct" is a struct that the VM tells us is a basic type. This can only happen if // the struct contains a single element, and that element is 4 bytes (on x64 it can also be 8 // bytes). Normally, the type of the local var and the type of GT_FIELD are equivalent. However, // there is one extremely rare case where that won't be true. An enum type is a special value type // that contains exactly one element of a primitive integer type (that, for CLS programs is named // "value__"). The VM tells us that a local var of that enum type is the primitive type of the // enum's single field. It turns out that it is legal for IL to access this field using ldflda or // ldfld. For example: // // .class public auto ansi sealed mynamespace.e_t extends [mscorlib]System.Enum // { // .field public specialname rtspecialname int16 value__ // .field public static literal valuetype mynamespace.e_t one = int16(0x0000) // } // .method public hidebysig static void Main() cil managed // { // .locals init (valuetype mynamespace.e_t V_0) // ... // ldloca.s V_0 // ldflda int16 mynamespace.e_t::value__ // ... // } // // Normally, compilers will not generate the ldflda, since it is superfluous. // // In the example, the lclVar is short, but the JIT promotes all trees using this local to the // "actual type", that is, INT. But the GT_FIELD is still SHORT. So, in the case of a type // mismatch like this, don't do this morphing. The local var may end up getting marked as // address taken, and the appropriate SHORT load will be done from memory in that case. if (tree->TypeGet() == obj->TypeGet()) { tree->ChangeOper(GT_LCL_VAR); tree->AsLclVarCommon()->SetLclNum(lclNum); tree->gtFlags &= GTF_NODE_MASK; if ((parent->gtOper == GT_ASG) && (parent->AsOp()->gtOp1 == tree)) { tree->gtFlags |= GTF_VAR_DEF; tree->gtFlags |= GTF_DONT_CSE; } #ifdef DEBUG if (verbose) { printf("Replacing the field in normed struct with local var V%02u\n", lclNum); } #endif // DEBUG } } } } void Compiler::fgMorphLocalField(GenTree* tree, GenTree* parent) { noway_assert(tree->OperGet() == GT_LCL_FLD); unsigned lclNum = tree->AsLclFld()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varTypeIsStruct(varDsc)) { if (varDsc->lvPromoted) { // Promoted struct unsigned fldOffset = tree->AsLclFld()->GetLclOffs(); unsigned fieldLclIndex = 0; LclVarDsc* fldVarDsc = nullptr; if (fldOffset != BAD_VAR_NUM) { fieldLclIndex = lvaGetFieldLocal(varDsc, fldOffset); noway_assert(fieldLclIndex != BAD_VAR_NUM); fldVarDsc = lvaGetDesc(fieldLclIndex); } var_types treeType = tree->TypeGet(); var_types fieldType = fldVarDsc->TypeGet(); if (fldOffset != BAD_VAR_NUM && ((genTypeSize(fieldType) == genTypeSize(treeType)) || (varDsc->lvFieldCnt == 1))) { // There is an existing sub-field we can use. tree->AsLclFld()->SetLclNum(fieldLclIndex); // The field must be an enregisterable type; otherwise it would not be a promoted field. // The tree type may not match, e.g. for return types that have been morphed, but both // must be enregisterable types. assert(varTypeIsEnregisterable(treeType) && varTypeIsEnregisterable(fieldType)); tree->ChangeOper(GT_LCL_VAR); assert(tree->AsLclVarCommon()->GetLclNum() == fieldLclIndex); tree->gtType = fldVarDsc->TypeGet(); if ((parent->gtOper == GT_ASG) && (parent->AsOp()->gtOp1 == tree)) { tree->gtFlags |= GTF_VAR_DEF; tree->gtFlags |= GTF_DONT_CSE; } JITDUMP("Replacing the GT_LCL_FLD in promoted struct with local var V%02u\n", fieldLclIndex); } else { // There is no existing field that has all the parts that we need // So we must ensure that the struct lives in memory. lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField)); #ifdef DEBUG // We can't convert this guy to a float because he really does have his // address taken.. varDsc->lvKeepType = 1; #endif // DEBUG } } else if (varTypeIsSIMD(varDsc) && (genTypeSize(tree->TypeGet()) == genTypeSize(varDsc))) { assert(tree->AsLclFld()->GetLclOffs() == 0); tree->gtType = varDsc->TypeGet(); tree->ChangeOper(GT_LCL_VAR); JITDUMP("Replacing GT_LCL_FLD of struct with local var V%02u\n", lclNum); } } } //------------------------------------------------------------------------ // fgResetImplicitByRefRefCount: Clear the ref count field of all implicit byrefs void Compiler::fgResetImplicitByRefRefCount() { #if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) #ifdef DEBUG if (verbose) { printf("\n*************** In fgResetImplicitByRefRefCount()\n"); } #endif // DEBUG for (unsigned lclNum = 0; lclNum < info.compArgsCount; ++lclNum) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvIsImplicitByRef) { // Clear the ref count field; fgMarkAddressTakenLocals will increment it per // appearance of implicit-by-ref param so that call arg morphing can do an // optimization for single-use implicit-by-ref params whose single use is as // an outgoing call argument. varDsc->setLvRefCnt(0, RCS_EARLY); varDsc->setLvRefCntWtd(0, RCS_EARLY); } } #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } //------------------------------------------------------------------------ // fgRetypeImplicitByRefArgs: Update the types on implicit byref parameters' `LclVarDsc`s (from // struct to pointer). Also choose (based on address-exposed analysis) // which struct promotions of implicit byrefs to keep or discard. // For those which are kept, insert the appropriate initialization code. // For those which are to be discarded, annotate the promoted field locals // so that fgMorphImplicitByRefArgs will know to rewrite their appearances // using indirections off the pointer parameters. void Compiler::fgRetypeImplicitByRefArgs() { #if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) #ifdef DEBUG if (verbose) { printf("\n*************** In fgRetypeImplicitByRefArgs()\n"); } #endif // DEBUG for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (lvaIsImplicitByRefLocal(lclNum)) { unsigned size; if (varDsc->lvSize() > REGSIZE_BYTES) { size = varDsc->lvSize(); } else { CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd(); size = info.compCompHnd->getClassSize(typeHnd); } if (varDsc->lvPromoted) { // This implicit-by-ref was promoted; create a new temp to represent the // promoted struct before rewriting this parameter as a pointer. unsigned newLclNum = lvaGrabTemp(false DEBUGARG("Promoted implicit byref")); lvaSetStruct(newLclNum, lvaGetStruct(lclNum), true); if (info.compIsVarArgs) { lvaSetStructUsedAsVarArg(newLclNum); } // Update varDsc since lvaGrabTemp might have re-allocated the var dsc array. varDsc = lvaGetDesc(lclNum); // Copy the struct promotion annotations to the new temp. LclVarDsc* newVarDsc = lvaGetDesc(newLclNum); newVarDsc->lvPromoted = true; newVarDsc->lvFieldLclStart = varDsc->lvFieldLclStart; newVarDsc->lvFieldCnt = varDsc->lvFieldCnt; newVarDsc->lvContainsHoles = varDsc->lvContainsHoles; newVarDsc->lvCustomLayout = varDsc->lvCustomLayout; #ifdef DEBUG newVarDsc->lvKeepType = true; #endif // DEBUG // Propagate address-taken-ness and do-not-enregister-ness. newVarDsc->SetAddressExposed(varDsc->IsAddressExposed() DEBUGARG(varDsc->GetAddrExposedReason())); newVarDsc->lvDoNotEnregister = varDsc->lvDoNotEnregister; newVarDsc->lvLiveInOutOfHndlr = varDsc->lvLiveInOutOfHndlr; newVarDsc->lvSingleDef = varDsc->lvSingleDef; newVarDsc->lvSingleDefRegCandidate = varDsc->lvSingleDefRegCandidate; newVarDsc->lvSpillAtSingleDef = varDsc->lvSpillAtSingleDef; #ifdef DEBUG newVarDsc->SetDoNotEnregReason(varDsc->GetDoNotEnregReason()); #endif // DEBUG // If the promotion is dependent, the promoted temp would just be committed // to memory anyway, so we'll rewrite its appearances to be indirections // through the pointer parameter, the same as we'd do for this // parameter if it weren't promoted at all (otherwise the initialization // of the new temp would just be a needless memcpy at method entry). // // Otherwise, see how many appearances there are. We keep two early ref counts: total // number of references to the struct or some field, and how many of these are // arguments to calls. We undo promotion unless we see enough non-call uses. // const unsigned totalAppearances = varDsc->lvRefCnt(RCS_EARLY); const unsigned callAppearances = (unsigned)varDsc->lvRefCntWtd(RCS_EARLY); assert(totalAppearances >= callAppearances); const unsigned nonCallAppearances = totalAppearances - callAppearances; bool undoPromotion = ((lvaGetPromotionType(newVarDsc) == PROMOTION_TYPE_DEPENDENT) || (nonCallAppearances <= varDsc->lvFieldCnt)); #ifdef DEBUG // Above is a profitability heurisic; either value of // undoPromotion should lead to correct code. So, // under stress, make different decisions at times. if (compStressCompile(STRESS_BYREF_PROMOTION, 25)) { undoPromotion = !undoPromotion; JITDUMP("Stress -- changing byref undo promotion for V%02u to %s undo\n", lclNum, undoPromotion ? "" : "NOT"); } #endif // DEBUG JITDUMP("%s promotion of implicit by-ref V%02u: %s total: %u non-call: %u fields: %u\n", undoPromotion ? "Undoing" : "Keeping", lclNum, (lvaGetPromotionType(newVarDsc) == PROMOTION_TYPE_DEPENDENT) ? "dependent;" : "", totalAppearances, nonCallAppearances, varDsc->lvFieldCnt); if (!undoPromotion) { // Insert IR that initializes the temp from the parameter. // LHS is a simple reference to the temp. fgEnsureFirstBBisScratch(); GenTree* lhs = gtNewLclvNode(newLclNum, varDsc->lvType); // RHS is an indirection (using GT_OBJ) off the parameter. GenTree* addr = gtNewLclvNode(lclNum, TYP_BYREF); GenTree* rhs = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, typGetBlkLayout(size)); GenTree* assign = gtNewAssignNode(lhs, rhs); fgNewStmtAtBeg(fgFirstBB, assign); } // Update the locals corresponding to the promoted fields. unsigned fieldLclStart = varDsc->lvFieldLclStart; unsigned fieldCount = varDsc->lvFieldCnt; unsigned fieldLclStop = fieldLclStart + fieldCount; for (unsigned fieldLclNum = fieldLclStart; fieldLclNum < fieldLclStop; ++fieldLclNum) { LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); if (undoPromotion) { // Leave lvParentLcl pointing to the parameter so that fgMorphImplicitByRefArgs // will know to rewrite appearances of this local. assert(fieldVarDsc->lvParentLcl == lclNum); } else { // Set the new parent. fieldVarDsc->lvParentLcl = newLclNum; } fieldVarDsc->lvIsParam = false; // The fields shouldn't inherit any register preferences from // the parameter which is really a pointer to the struct. fieldVarDsc->lvIsRegArg = false; fieldVarDsc->lvIsMultiRegArg = false; fieldVarDsc->SetArgReg(REG_NA); #if FEATURE_MULTIREG_ARGS fieldVarDsc->SetOtherArgReg(REG_NA); #endif } // Hijack lvFieldLclStart to record the new temp number. // It will get fixed up in fgMarkDemotedImplicitByRefArgs. varDsc->lvFieldLclStart = newLclNum; // Go ahead and clear lvFieldCnt -- either we're promoting // a replacement temp or we're not promoting this arg, and // in either case the parameter is now a pointer that doesn't // have these fields. varDsc->lvFieldCnt = 0; // Hijack lvPromoted to communicate to fgMorphImplicitByRefArgs // whether references to the struct should be rewritten as // indirections off the pointer (not promoted) or references // to the new struct local (promoted). varDsc->lvPromoted = !undoPromotion; } else { // The "undo promotion" path above clears lvPromoted for args that struct // promotion wanted to promote but that aren't considered profitable to // rewrite. It hijacks lvFieldLclStart to communicate to // fgMarkDemotedImplicitByRefArgs that it needs to clean up annotations left // on such args for fgMorphImplicitByRefArgs to consult in the interim. // Here we have an arg that was simply never promoted, so make sure it doesn't // have nonzero lvFieldLclStart, since that would confuse fgMorphImplicitByRefArgs // and fgMarkDemotedImplicitByRefArgs. assert(varDsc->lvFieldLclStart == 0); } // Since the parameter in this position is really a pointer, its type is TYP_BYREF. varDsc->lvType = TYP_BYREF; // Since this previously was a TYP_STRUCT and we have changed it to a TYP_BYREF // make sure that the following flag is not set as these will force SSA to // exclude tracking/enregistering these LclVars. (see SsaBuilder::IncludeInSsa) // varDsc->lvOverlappingFields = 0; // This flag could have been set, clear it. // The struct parameter may have had its address taken, but the pointer parameter // cannot -- any uses of the struct parameter's address are uses of the pointer // parameter's value, and there's no way for the MSIL to reference the pointer // parameter's address. So clear the address-taken bit for the parameter. varDsc->CleanAddressExposed(); varDsc->lvDoNotEnregister = 0; #ifdef DEBUG // This should not be converted to a double in stress mode, // because it is really a pointer varDsc->lvKeepType = 1; if (verbose) { printf("Changing the lvType for struct parameter V%02d to TYP_BYREF.\n", lclNum); } #endif // DEBUG } } #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } //------------------------------------------------------------------------ // fgMarkDemotedImplicitByRefArgs: Clear annotations for any implicit byrefs that struct promotion // asked to promote. Appearances of these have now been rewritten // (by fgMorphImplicitByRefArgs) using indirections from the pointer // parameter or references to the promotion temp, as appropriate. void Compiler::fgMarkDemotedImplicitByRefArgs() { JITDUMP("\n*************** In fgMarkDemotedImplicitByRefArgs()\n"); #if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (lvaIsImplicitByRefLocal(lclNum)) { JITDUMP("Clearing annotation for V%02d\n", lclNum); if (varDsc->lvPromoted) { // The parameter is simply a pointer now, so clear lvPromoted. It was left set // by fgRetypeImplicitByRefArgs to communicate to fgMorphImplicitByRefArgs that // appearances of this arg needed to be rewritten to a new promoted struct local. varDsc->lvPromoted = false; // Clear the lvFieldLclStart value that was set by fgRetypeImplicitByRefArgs // to tell fgMorphImplicitByRefArgs which local is the new promoted struct one. varDsc->lvFieldLclStart = 0; } else if (varDsc->lvFieldLclStart != 0) { // We created new temps to represent a promoted struct corresponding to this // parameter, but decided not to go through with the promotion and have // rewritten all uses as indirections off the pointer parameter. // We stashed the pointer to the new struct temp in lvFieldLclStart; make // note of that and clear the annotation. unsigned structLclNum = varDsc->lvFieldLclStart; varDsc->lvFieldLclStart = 0; // The temp struct is now unused; set flags appropriately so that we // won't allocate space for it on the stack. LclVarDsc* structVarDsc = lvaGetDesc(structLclNum); structVarDsc->CleanAddressExposed(); #ifdef DEBUG structVarDsc->lvUnusedStruct = true; structVarDsc->lvUndoneStructPromotion = true; #endif // DEBUG unsigned fieldLclStart = structVarDsc->lvFieldLclStart; unsigned fieldCount = structVarDsc->lvFieldCnt; unsigned fieldLclStop = fieldLclStart + fieldCount; for (unsigned fieldLclNum = fieldLclStart; fieldLclNum < fieldLclStop; ++fieldLclNum) { JITDUMP("Fixing pointer for field V%02d from V%02d to V%02d\n", fieldLclNum, lclNum, structLclNum); // Fix the pointer to the parent local. LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); assert(fieldVarDsc->lvParentLcl == lclNum); fieldVarDsc->lvParentLcl = structLclNum; // The field local is now unused; set flags appropriately so that // we won't allocate stack space for it. fieldVarDsc->CleanAddressExposed(); } } } } #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } /***************************************************************************** * * Morph irregular parameters * for x64 and ARM64 this means turning them into byrefs, adding extra indirs. */ bool Compiler::fgMorphImplicitByRefArgs(GenTree* tree) { #if (!defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI)) && !defined(TARGET_ARM64) return false; #else // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 bool changed = false; // Implicit byref morphing needs to know if the reference to the parameter is a // child of GT_ADDR or not, so this method looks one level down and does the // rewrite whenever a child is a reference to an implicit byref parameter. if (tree->gtOper == GT_ADDR) { if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { GenTree* morphedTree = fgMorphImplicitByRefArgs(tree, true); changed = (morphedTree != nullptr); assert(!changed || (morphedTree == tree)); } } else { for (GenTree** pTree : tree->UseEdges()) { GenTree** pTreeCopy = pTree; GenTree* childTree = *pTree; if (childTree->gtOper == GT_LCL_VAR) { GenTree* newChildTree = fgMorphImplicitByRefArgs(childTree, false); if (newChildTree != nullptr) { changed = true; *pTreeCopy = newChildTree; } } } } return changed; #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } GenTree* Compiler::fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr) { assert((tree->gtOper == GT_LCL_VAR) || ((tree->gtOper == GT_ADDR) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR))); assert(isAddr == (tree->gtOper == GT_ADDR)); GenTree* lclVarTree = isAddr ? tree->AsOp()->gtOp1 : tree; unsigned lclNum = lclVarTree->AsLclVarCommon()->GetLclNum(); LclVarDsc* lclVarDsc = lvaGetDesc(lclNum); CORINFO_FIELD_HANDLE fieldHnd; unsigned fieldOffset = 0; var_types fieldRefType = TYP_UNKNOWN; if (lvaIsImplicitByRefLocal(lclNum)) { // The SIMD transformation to coalesce contiguous references to SIMD vector fields will // re-invoke the traversal to mark address-taken locals. // So, we may encounter a tree that has already been transformed to TYP_BYREF. // If we do, leave it as-is. if (!varTypeIsStruct(lclVarTree)) { assert(lclVarTree->TypeGet() == TYP_BYREF); return nullptr; } else if (lclVarDsc->lvPromoted) { // fgRetypeImplicitByRefArgs created a new promoted struct local to represent this // arg. Rewrite this to refer to the new local. assert(lclVarDsc->lvFieldLclStart != 0); lclVarTree->AsLclVarCommon()->SetLclNum(lclVarDsc->lvFieldLclStart); return tree; } fieldHnd = nullptr; } else if (lclVarDsc->lvIsStructField && lvaIsImplicitByRefLocal(lclVarDsc->lvParentLcl)) { // This was a field reference to an implicit-by-reference struct parameter that was // dependently promoted; update it to a field reference off the pointer. // Grab the field handle from the struct field lclVar. fieldHnd = lclVarDsc->lvFieldHnd; fieldOffset = lclVarDsc->lvFldOffset; assert(fieldHnd != nullptr); // Update lclNum/lclVarDsc to refer to the parameter lclNum = lclVarDsc->lvParentLcl; lclVarDsc = lvaGetDesc(lclNum); fieldRefType = lclVarTree->TypeGet(); } else { // We only need to tranform the 'marked' implicit by ref parameters return nullptr; } // This is no longer a def of the lclVar, even if it WAS a def of the struct. lclVarTree->gtFlags &= ~(GTF_LIVENESS_MASK); if (isAddr) { if (fieldHnd == nullptr) { // change &X into just plain X tree->ReplaceWith(lclVarTree, this); tree->gtType = TYP_BYREF; } else { // change &(X.f) [i.e. GT_ADDR of local for promoted arg field] // into &(X, f) [i.e. GT_ADDR of GT_FIELD off ptr param] lclVarTree->AsLclVarCommon()->SetLclNum(lclNum); lclVarTree->gtType = TYP_BYREF; tree->AsOp()->gtOp1 = gtNewFieldRef(fieldRefType, fieldHnd, lclVarTree, fieldOffset); } #ifdef DEBUG if (verbose) { printf("Replacing address of implicit by ref struct parameter with byref:\n"); } #endif // DEBUG } else { // Change X into OBJ(X) or FIELD(X, f) var_types structType = tree->gtType; tree->gtType = TYP_BYREF; if (fieldHnd) { tree->AsLclVarCommon()->SetLclNum(lclNum); tree = gtNewFieldRef(fieldRefType, fieldHnd, tree, fieldOffset); } else { tree = gtNewObjNode(lclVarDsc->GetStructHnd(), tree); if (structType == TYP_STRUCT) { gtSetObjGcInfo(tree->AsObj()); } } // TODO-CQ: If the VM ever stops violating the ABI and passing heap references // we could remove TGTANYWHERE tree->gtFlags = ((tree->gtFlags & GTF_COMMON_MASK) | GTF_IND_TGTANYWHERE); #ifdef DEBUG if (verbose) { printf("Replacing value of implicit by ref struct parameter with indir of parameter:\n"); } #endif // DEBUG } #ifdef DEBUG if (verbose) { gtDispTree(tree); } #endif // DEBUG return tree; } //------------------------------------------------------------------------ // fgAddFieldSeqForZeroOffset: // Associate a fieldSeq (with a zero offset) with the GenTree node 'addr' // // Arguments: // addr - A GenTree node // fieldSeqZero - a fieldSeq (with a zero offset) // // Notes: // Some GenTree nodes have internal fields that record the field sequence. // If we have one of these nodes: GT_CNS_INT, GT_LCL_FLD // we can append the field sequence using the gtFieldSeq // If we have a GT_ADD of a GT_CNS_INT we can use the // fieldSeq from child node. // Otherwise we record 'fieldSeqZero' in the GenTree node using // a Map: GetFieldSeqStore() // When doing so we take care to preserve any existing zero field sequence // void Compiler::fgAddFieldSeqForZeroOffset(GenTree* addr, FieldSeqNode* fieldSeqZero) { // We expect 'addr' to be an address at this point. assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF); // Tunnel through any commas. const bool commaOnly = true; addr = addr->gtEffectiveVal(commaOnly); // We still expect 'addr' to be an address at this point. assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF); FieldSeqNode* fieldSeqUpdate = fieldSeqZero; GenTree* fieldSeqNode = addr; bool fieldSeqRecorded = false; #ifdef DEBUG if (verbose) { printf("\nfgAddFieldSeqForZeroOffset for"); gtDispAnyFieldSeq(fieldSeqZero); printf("\naddr (Before)\n"); gtDispNode(addr, nullptr, nullptr, false); gtDispCommonEndLine(addr); } #endif // DEBUG switch (addr->OperGet()) { case GT_CNS_INT: fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsIntCon()->gtFieldSeq, fieldSeqZero); addr->AsIntCon()->gtFieldSeq = fieldSeqUpdate; fieldSeqRecorded = true; break; case GT_ADDR: if (addr->AsOp()->gtOp1->OperGet() == GT_LCL_FLD) { fieldSeqNode = addr->AsOp()->gtOp1; GenTreeLclFld* lclFld = addr->AsOp()->gtOp1->AsLclFld(); fieldSeqUpdate = GetFieldSeqStore()->Append(lclFld->GetFieldSeq(), fieldSeqZero); lclFld->SetFieldSeq(fieldSeqUpdate); fieldSeqRecorded = true; } break; case GT_ADD: if (addr->AsOp()->gtOp1->OperGet() == GT_CNS_INT) { fieldSeqNode = addr->AsOp()->gtOp1; fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsOp()->gtOp1->AsIntCon()->gtFieldSeq, fieldSeqZero); addr->AsOp()->gtOp1->AsIntCon()->gtFieldSeq = fieldSeqUpdate; fieldSeqRecorded = true; } else if (addr->AsOp()->gtOp2->OperGet() == GT_CNS_INT) { fieldSeqNode = addr->AsOp()->gtOp2; fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsOp()->gtOp2->AsIntCon()->gtFieldSeq, fieldSeqZero); addr->AsOp()->gtOp2->AsIntCon()->gtFieldSeq = fieldSeqUpdate; fieldSeqRecorded = true; } break; default: break; } if (fieldSeqRecorded == false) { // Record in the general zero-offset map. // The "addr" node might already be annotated with a zero-offset field sequence. FieldSeqNode* existingFieldSeq = nullptr; if (GetZeroOffsetFieldMap()->Lookup(addr, &existingFieldSeq)) { // Append the zero field sequences fieldSeqUpdate = GetFieldSeqStore()->Append(existingFieldSeq, fieldSeqZero); } // Overwrite the field sequence annotation for op1 GetZeroOffsetFieldMap()->Set(addr, fieldSeqUpdate, NodeToFieldSeqMap::Overwrite); fieldSeqRecorded = true; } #ifdef DEBUG if (verbose) { printf(" (After)\n"); gtDispNode(fieldSeqNode, nullptr, nullptr, false); gtDispCommonEndLine(fieldSeqNode); } #endif // DEBUG } #ifdef FEATURE_SIMD //----------------------------------------------------------------------------------- // fgMorphCombineSIMDFieldAssignments: // If the RHS of the input stmt is a read for simd vector X Field, then this function // will keep reading next few stmts based on the vector size(2, 3, 4). // If the next stmts LHS are located contiguous and RHS are also located // contiguous, then we replace those statements with a copyblk. // // Argument: // block - BasicBlock*. block which stmt belongs to // stmt - Statement*. the stmt node we want to check // // return value: // if this funciton successfully optimized the stmts, then return true. Otherwise // return false; bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt) { GenTree* tree = stmt->GetRootNode(); assert(tree->OperGet() == GT_ASG); GenTree* originalLHS = tree->AsOp()->gtOp1; GenTree* prevLHS = tree->AsOp()->gtOp1; GenTree* prevRHS = tree->AsOp()->gtOp2; unsigned index = 0; CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned simdSize = 0; GenTree* simdStructNode = getSIMDStructFromField(prevRHS, &simdBaseJitType, &index, &simdSize, true); if (simdStructNode == nullptr || index != 0 || simdBaseJitType != CORINFO_TYPE_FLOAT) { // if the RHS is not from a SIMD vector field X, then there is no need to check further. return false; } var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); var_types simdType = getSIMDTypeForSize(simdSize); int assignmentsCount = simdSize / genTypeSize(simdBaseType) - 1; int remainingAssignments = assignmentsCount; Statement* curStmt = stmt->GetNextStmt(); Statement* lastStmt = stmt; while (curStmt != nullptr && remainingAssignments > 0) { GenTree* exp = curStmt->GetRootNode(); if (exp->OperGet() != GT_ASG) { break; } GenTree* curLHS = exp->gtGetOp1(); GenTree* curRHS = exp->gtGetOp2(); if (!areArgumentsContiguous(prevLHS, curLHS) || !areArgumentsContiguous(prevRHS, curRHS)) { break; } remainingAssignments--; prevLHS = curLHS; prevRHS = curRHS; lastStmt = curStmt; curStmt = curStmt->GetNextStmt(); } if (remainingAssignments > 0) { // if the left assignments number is bigger than zero, then this means // that the assignments are not assgining to the contiguously memory // locations from same vector. return false; } #ifdef DEBUG if (verbose) { printf("\nFound contiguous assignments from a SIMD vector to memory.\n"); printf("From " FMT_BB ", stmt ", block->bbNum); printStmtID(stmt); printf(" to stmt"); printStmtID(lastStmt); printf("\n"); } #endif for (int i = 0; i < assignmentsCount; i++) { fgRemoveStmt(block, stmt->GetNextStmt()); } GenTree* dstNode; if (originalLHS->OperIs(GT_LCL_FLD)) { dstNode = originalLHS; dstNode->gtType = simdType; dstNode->AsLclFld()->SetFieldSeq(FieldSeqStore::NotAField()); // This may have changed a partial local field into full local field if (dstNode->IsPartialLclFld(this)) { dstNode->gtFlags |= GTF_VAR_USEASG; } else { dstNode->gtFlags &= ~GTF_VAR_USEASG; } } else { GenTree* copyBlkDst = createAddressNodeForSIMDInit(originalLHS, simdSize); if (simdStructNode->OperIsLocal()) { setLclRelatedToSIMDIntrinsic(simdStructNode); } GenTree* copyBlkAddr = copyBlkDst; if (copyBlkAddr->gtOper == GT_LEA) { copyBlkAddr = copyBlkAddr->AsAddrMode()->Base(); } GenTreeLclVarCommon* localDst = copyBlkAddr->IsLocalAddrExpr(); if (localDst != nullptr) { setLclRelatedToSIMDIntrinsic(localDst); } if (simdStructNode->TypeGet() == TYP_BYREF) { assert(simdStructNode->OperIsLocal()); assert(lvaIsImplicitByRefLocal(simdStructNode->AsLclVarCommon()->GetLclNum())); simdStructNode = gtNewIndir(simdType, simdStructNode); } else { assert(varTypeIsSIMD(simdStructNode)); } dstNode = gtNewOperNode(GT_IND, simdType, copyBlkDst); } #ifdef DEBUG if (verbose) { printf("\n" FMT_BB " stmt ", block->bbNum); printStmtID(stmt); printf("(before)\n"); gtDispStmt(stmt); } #endif assert(!simdStructNode->CanCSE()); simdStructNode->ClearDoNotCSE(); tree = gtNewAssignNode(dstNode, simdStructNode); stmt->SetRootNode(tree); // Since we generated a new address node which didn't exist before, // we should expose this address manually here. // TODO-ADDR: Remove this when LocalAddressVisitor transforms all // local field access into LCL_FLDs, at that point we would be // combining 2 existing LCL_FLDs or 2 FIELDs that do not reference // a local and thus cannot result in a new address exposed local. fgMarkAddressExposedLocals(stmt); #ifdef DEBUG if (verbose) { printf("\nReplaced " FMT_BB " stmt", block->bbNum); printStmtID(stmt); printf("(after)\n"); gtDispStmt(stmt); } #endif return true; } #endif // FEATURE_SIMD //------------------------------------------------------------------------ // fgCheckStmtAfterTailCall: check that statements after the tail call stmt // candidate are in one of expected forms, that are desctibed below. // // Return Value: // 'true' if stmts are in the expected form, else 'false'. // bool Compiler::fgCheckStmtAfterTailCall() { // For void calls, we would have created a GT_CALL in the stmt list. // For non-void calls, we would have created a GT_RETURN(GT_CAST(GT_CALL)). // For calls returning structs, we would have a void call, followed by a void return. // For debuggable code, it would be an assignment of the call to a temp // We want to get rid of any of this extra trees, and just leave // the call. Statement* callStmt = fgMorphStmt; Statement* nextMorphStmt = callStmt->GetNextStmt(); // Check that the rest stmts in the block are in one of the following pattern: // 1) ret(void) // 2) ret(cast*(callResultLclVar)) // 3) lclVar = callResultLclVar, the actual ret(lclVar) in another block // 4) nop if (nextMorphStmt != nullptr) { GenTree* callExpr = callStmt->GetRootNode(); if (callExpr->gtOper != GT_ASG) { // The next stmt can be GT_RETURN(TYP_VOID) or GT_RETURN(lclVar), // where lclVar was return buffer in the call for structs or simd. Statement* retStmt = nextMorphStmt; GenTree* retExpr = retStmt->GetRootNode(); noway_assert(retExpr->gtOper == GT_RETURN); nextMorphStmt = retStmt->GetNextStmt(); } else { noway_assert(callExpr->gtGetOp1()->OperIsLocal()); unsigned callResultLclNumber = callExpr->gtGetOp1()->AsLclVarCommon()->GetLclNum(); #if FEATURE_TAILCALL_OPT_SHARED_RETURN // We can have a chain of assignments from the call result to // various inline return spill temps. These are ok as long // as the last one ultimately provides the return value or is ignored. // // And if we're returning a small type we may see a cast // on the source side. while ((nextMorphStmt != nullptr) && (nextMorphStmt->GetRootNode()->OperIs(GT_ASG, GT_NOP))) { if (nextMorphStmt->GetRootNode()->OperIs(GT_NOP)) { nextMorphStmt = nextMorphStmt->GetNextStmt(); continue; } Statement* moveStmt = nextMorphStmt; GenTree* moveExpr = nextMorphStmt->GetRootNode(); GenTree* moveDest = moveExpr->gtGetOp1(); noway_assert(moveDest->OperIsLocal()); // Tunnel through any casts on the source side. GenTree* moveSource = moveExpr->gtGetOp2(); while (moveSource->OperIs(GT_CAST)) { noway_assert(!moveSource->gtOverflow()); moveSource = moveSource->gtGetOp1(); } noway_assert(moveSource->OperIsLocal()); // Verify we're just passing the value from one local to another // along the chain. const unsigned srcLclNum = moveSource->AsLclVarCommon()->GetLclNum(); noway_assert(srcLclNum == callResultLclNumber); const unsigned dstLclNum = moveDest->AsLclVarCommon()->GetLclNum(); callResultLclNumber = dstLclNum; nextMorphStmt = moveStmt->GetNextStmt(); } if (nextMorphStmt != nullptr) #endif { Statement* retStmt = nextMorphStmt; GenTree* retExpr = nextMorphStmt->GetRootNode(); noway_assert(retExpr->gtOper == GT_RETURN); GenTree* treeWithLcl = retExpr->gtGetOp1(); while (treeWithLcl->gtOper == GT_CAST) { noway_assert(!treeWithLcl->gtOverflow()); treeWithLcl = treeWithLcl->gtGetOp1(); } noway_assert(callResultLclNumber == treeWithLcl->AsLclVarCommon()->GetLclNum()); nextMorphStmt = retStmt->GetNextStmt(); } } } return nextMorphStmt == nullptr; } //------------------------------------------------------------------------ // fgCanTailCallViaJitHelper: check whether we can use the faster tailcall // JIT helper on x86. // // Return Value: // 'true' if we can; or 'false' if we should use the generic tailcall mechanism. // bool Compiler::fgCanTailCallViaJitHelper() { #if !defined(TARGET_X86) || defined(UNIX_X86_ABI) || defined(FEATURE_READYTORUN) // On anything except windows X86 we have no faster mechanism available. return false; #else // The JIT helper does not properly handle the case where localloc was used. if (compLocallocUsed) return false; return true; #endif } //------------------------------------------------------------------------ // fgMorphReduceAddOps: reduce successive variable adds into a single multiply, // e.g., i + i + i + i => i * 4. // // Arguments: // tree - tree for reduction // // Return Value: // reduced tree if pattern matches, original tree otherwise // GenTree* Compiler::fgMorphReduceAddOps(GenTree* tree) { // ADD(_, V0) starts the pattern match. if (!tree->OperIs(GT_ADD) || tree->gtOverflow()) { return tree; } #ifndef TARGET_64BIT // Transforming 64-bit ADD to 64-bit MUL on 32-bit system results in replacing // ADD ops with a helper function call. Don't apply optimization in that case. if (tree->TypeGet() == TYP_LONG) { return tree; } #endif GenTree* lclVarTree = tree->AsOp()->gtOp2; GenTree* consTree = tree->AsOp()->gtOp1; GenTree* op1 = consTree; GenTree* op2 = lclVarTree; if (!op2->OperIs(GT_LCL_VAR) || !varTypeIsIntegral(op2)) { return tree; } int foldCount = 0; unsigned lclNum = op2->AsLclVarCommon()->GetLclNum(); // Search for pattern of shape ADD(ADD(ADD(lclNum, lclNum), lclNum), lclNum). while (true) { // ADD(lclNum, lclNum), end of tree if (op1->OperIs(GT_LCL_VAR) && op1->AsLclVarCommon()->GetLclNum() == lclNum && op2->OperIs(GT_LCL_VAR) && op2->AsLclVarCommon()->GetLclNum() == lclNum) { foldCount += 2; break; } // ADD(ADD(X, Y), lclNum), keep descending else if (op1->OperIs(GT_ADD) && !op1->gtOverflow() && op2->OperIs(GT_LCL_VAR) && op2->AsLclVarCommon()->GetLclNum() == lclNum) { foldCount++; op2 = op1->AsOp()->gtOp2; op1 = op1->AsOp()->gtOp1; } // Any other case is a pattern we won't attempt to fold for now. else { return tree; } } // V0 + V0 ... + V0 becomes V0 * foldCount, where postorder transform will optimize // accordingly consTree->BashToConst(foldCount, tree->TypeGet()); GenTree* morphed = gtNewOperNode(GT_MUL, tree->TypeGet(), lclVarTree, consTree); DEBUG_DESTROY_NODE(tree); return morphed; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Morph XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "allocacheck.h" // for alloca // Convert the given node into a call to the specified helper passing // the given argument list. // // Tries to fold constants and also adds an edge for overflow exception // returns the morphed tree GenTree* Compiler::fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper) { GenTree* result; /* If the operand is a constant, we'll try to fold it */ if (oper->OperIsConst()) { GenTree* oldTree = tree; tree = gtFoldExprConst(tree); // This may not fold the constant (NaN ...) if (tree != oldTree) { return fgMorphTree(tree); } else if (tree->OperIsConst()) { return fgMorphConst(tree); } // assert that oper is unchanged and that it is still a GT_CAST node noway_assert(tree->AsCast()->CastOp() == oper); noway_assert(tree->gtOper == GT_CAST); } result = fgMorphIntoHelperCall(tree, helper, gtNewCallArgs(oper)); assert(result == tree); return result; } /***************************************************************************** * * Convert the given node into a call to the specified helper passing * the given argument list. */ GenTree* Compiler::fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs) { // The helper call ought to be semantically equivalent to the original node, so preserve its VN. tree->ChangeOper(GT_CALL, GenTree::PRESERVE_VN); GenTreeCall* call = tree->AsCall(); call->gtCallType = CT_HELPER; call->gtReturnType = tree->TypeGet(); call->gtCallMethHnd = eeFindHelper(helper); call->gtCallThisArg = nullptr; call->gtCallArgs = args; call->gtCallLateArgs = nullptr; call->fgArgInfo = nullptr; call->gtRetClsHnd = nullptr; call->gtCallMoreFlags = GTF_CALL_M_EMPTY; call->gtInlineCandidateInfo = nullptr; call->gtControlExpr = nullptr; #ifdef UNIX_X86_ABI call->gtFlags |= GTF_CALL_POP_ARGS; #endif // UNIX_X86_ABI #if DEBUG // Helper calls are never candidates. call->gtInlineObservation = InlineObservation::CALLSITE_IS_CALL_TO_HELPER; call->callSig = nullptr; #endif // DEBUG #ifdef FEATURE_READYTORUN call->gtEntryPoint.addr = nullptr; call->gtEntryPoint.accessType = IAT_VALUE; #endif #if FEATURE_MULTIREG_RET call->ResetReturnType(); call->ClearOtherRegs(); call->ClearOtherRegFlags(); #ifndef TARGET_64BIT if (varTypeIsLong(tree)) { call->InitializeLongReturnType(); } #endif // !TARGET_64BIT #endif // FEATURE_MULTIREG_RET if (tree->OperMayThrow(this)) { tree->gtFlags |= GTF_EXCEPT; } else { tree->gtFlags &= ~GTF_EXCEPT; } tree->gtFlags |= GTF_CALL; for (GenTreeCall::Use& use : GenTreeCall::UseList(args)) { tree->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT); } /* Perform the morphing */ if (morphArgs) { tree = fgMorphArgs(call); } return tree; } //------------------------------------------------------------------------ // fgMorphExpandCast: Performs the pre-order (required) morphing for a cast. // // Performs a rich variety of pre-order transformations (and some optimizations). // // Notably: // 1. Splits long -> small type casts into long -> int -> small type // for 32 bit targets. Does the same for float/double -> small type // casts for all targets. // 2. Morphs casts not supported by the target directly into helpers. // These mostly have to do with casts from and to floating point // types, especially checked ones. Refer to the implementation for // what specific casts need to be handled - it is a complex matrix. // 3. "Casts away" the GC-ness of a tree (for CAST(nint <- byref)) via // assigning the GC tree to an inline - COMMA(ASG, LCL_VAR) - non-GC // temporary. // 3. "Pushes down" truncating long -> int casts for some operations: // CAST(int <- MUL(long, long)) => MUL(CAST(int <- long), CAST(int <- long)). // The purpose of this is to allow "optNarrowTree" in the post-order // traversal to fold the tree into a TYP_INT one, which helps 32 bit // targets (and AMD64 too since 32 bit instructions are more compact). // TODO-Arm64-CQ: Re-evaluate the value of this optimization for ARM64. // // Arguments: // tree - the cast tree to morph // // Return Value: // The fully morphed tree, or "nullptr" if it needs further morphing, // in which case the cast may be transformed into an unchecked one // and its operand changed (the cast "expanded" into two). // GenTree* Compiler::fgMorphExpandCast(GenTreeCast* tree) { GenTree* oper = tree->CastOp(); if (fgGlobalMorph && (oper->gtOper == GT_ADDR)) { // Make sure we've checked if 'oper' is an address of an implicit-byref parameter. // If it is, fgMorphImplicitByRefArgs will change its type, and we want the cast // morphing code to see that type. fgMorphImplicitByRefArgs(oper); } var_types srcType = genActualType(oper); var_types dstType = tree->CastToType(); unsigned dstSize = genTypeSize(dstType); // See if the cast has to be done in two steps. R -> I if (varTypeIsFloating(srcType) && varTypeIsIntegral(dstType)) { if (srcType == TYP_FLOAT #if defined(TARGET_ARM64) // Arm64: src = float, dst is overflow conversion. // This goes through helper and hence src needs to be converted to double. && tree->gtOverflow() #elif defined(TARGET_AMD64) // Amd64: src = float, dst = uint64 or overflow conversion. // This goes through helper and hence src needs to be converted to double. && (tree->gtOverflow() || (dstType == TYP_ULONG)) #elif defined(TARGET_ARM) // Arm: src = float, dst = int64/uint64 or overflow conversion. && (tree->gtOverflow() || varTypeIsLong(dstType)) #else // x86: src = float, dst = uint32/int64/uint64 or overflow conversion. && (tree->gtOverflow() || varTypeIsLong(dstType) || (dstType == TYP_UINT)) #endif ) { oper = gtNewCastNode(TYP_DOUBLE, oper, false, TYP_DOUBLE); } // Do we need to do it in two steps R -> I -> smallType? if (dstSize < genTypeSize(TYP_INT)) { oper = gtNewCastNodeL(TYP_INT, oper, /* fromUnsigned */ false, TYP_INT); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->AsCast()->CastOp() = oper; // We must not mistreat the original cast, which was from a floating point type, // as from an unsigned type, since we now have a TYP_INT node for the source and // CAST_OVF(BYTE <- INT) != CAST_OVF(BYTE <- UINT). assert(!tree->IsUnsigned()); } else { if (!tree->gtOverflow()) { #ifdef TARGET_ARM64 // ARM64 supports all non-overflow checking conversions directly. return nullptr; #else switch (dstType) { case TYP_INT: return nullptr; case TYP_UINT: #if defined(TARGET_ARM) || defined(TARGET_AMD64) return nullptr; #else // TARGET_X86 return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT, oper); #endif // TARGET_X86 case TYP_LONG: #ifdef TARGET_AMD64 // SSE2 has instructions to convert a float/double directly to a long return nullptr; #else // !TARGET_AMD64 return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG, oper); #endif // !TARGET_AMD64 case TYP_ULONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG, oper); default: unreached(); } #endif // TARGET_ARM64 } else { switch (dstType) { case TYP_INT: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2INT_OVF, oper); case TYP_UINT: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT_OVF, oper); case TYP_LONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG_OVF, oper); case TYP_ULONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG_OVF, oper); default: unreached(); } } } } #ifndef TARGET_64BIT // The code generation phase (for x86 & ARM32) does not handle casts // directly from [u]long to anything other than [u]int. Insert an // intermediate cast to native int. else if (varTypeIsLong(srcType) && varTypeIsSmall(dstType)) { oper = gtNewCastNode(TYP_I_IMPL, oper, tree->IsUnsigned(), TYP_I_IMPL); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->ClearUnsigned(); tree->AsCast()->CastOp() = oper; } #endif //! TARGET_64BIT #ifdef TARGET_ARMARCH // AArch, unlike x86/amd64, has instructions that can cast directly from // all integers (except for longs on AArch32 of course) to floats. // Because there is no IL instruction conv.r4.un, uint/ulong -> float // casts are always imported as CAST(float <- CAST(double <- uint/ulong)). // We can eliminate the redundant intermediate cast as an optimization. else if ((dstType == TYP_FLOAT) && (srcType == TYP_DOUBLE) && oper->OperIs(GT_CAST) #ifdef TARGET_ARM && !varTypeIsLong(oper->AsCast()->CastOp()) #endif ) { oper->gtType = TYP_FLOAT; oper->CastToType() = TYP_FLOAT; return fgMorphTree(oper); } #endif // TARGET_ARMARCH #ifdef TARGET_ARM // converts long/ulong --> float/double casts into helper calls. else if (varTypeIsFloating(dstType) && varTypeIsLong(srcType)) { if (dstType == TYP_FLOAT) { // there is only a double helper, so we // - change the dsttype to double // - insert a cast from double to float // - recurse into the resulting tree tree->CastToType() = TYP_DOUBLE; tree->gtType = TYP_DOUBLE; tree = gtNewCastNode(TYP_FLOAT, tree, false, TYP_FLOAT); return fgMorphTree(tree); } if (tree->gtFlags & GTF_UNSIGNED) return fgMorphCastIntoHelper(tree, CORINFO_HELP_ULNG2DBL, oper); return fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper); } #endif // TARGET_ARM #ifdef TARGET_AMD64 // Do we have to do two step U4/8 -> R4/8 ? // Codegen supports the following conversion as one-step operation // a) Long -> R4/R8 // b) U8 -> R8 // // The following conversions are performed as two-step operations using above. // U4 -> R4/8 = U4-> Long -> R4/8 // U8 -> R4 = U8 -> R8 -> R4 else if (tree->IsUnsigned() && varTypeIsFloating(dstType)) { srcType = varTypeToUnsigned(srcType); if (srcType == TYP_ULONG) { if (dstType == TYP_FLOAT) { // Codegen can handle U8 -> R8 conversion. // U8 -> R4 = U8 -> R8 -> R4 // - change the dsttype to double // - insert a cast from double to float // - recurse into the resulting tree tree->CastToType() = TYP_DOUBLE; tree->gtType = TYP_DOUBLE; tree = gtNewCastNode(TYP_FLOAT, tree, false, TYP_FLOAT); return fgMorphTree(tree); } } else if (srcType == TYP_UINT) { oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->ClearUnsigned(); tree->CastOp() = oper; } } #endif // TARGET_AMD64 #ifdef TARGET_X86 // Do we have to do two step U4/8 -> R4/8 ? else if (tree->IsUnsigned() && varTypeIsFloating(dstType)) { srcType = varTypeToUnsigned(srcType); if (srcType == TYP_ULONG) { return fgMorphCastIntoHelper(tree, CORINFO_HELP_ULNG2DBL, oper); } else if (srcType == TYP_UINT) { oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->gtFlags &= ~GTF_UNSIGNED; return fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper); } } else if (((tree->gtFlags & GTF_UNSIGNED) == 0) && (srcType == TYP_LONG) && varTypeIsFloating(dstType)) { oper = fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper); // Since we don't have a Jit Helper that converts to a TYP_FLOAT // we just use the one that converts to a TYP_DOUBLE // and then add a cast to TYP_FLOAT // if ((dstType == TYP_FLOAT) && (oper->OperGet() == GT_CALL)) { // Fix the return type to be TYP_DOUBLE // oper->gtType = TYP_DOUBLE; // Add a Cast to TYP_FLOAT // tree = gtNewCastNode(TYP_FLOAT, oper, false, TYP_FLOAT); INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return tree; } else { return oper; } } #endif // TARGET_X86 else if (varTypeIsGC(srcType) != varTypeIsGC(dstType)) { // We are casting away GC information. we would like to just // change the type to int, however this gives the emitter fits because // it believes the variable is a GC variable at the beginning of the // instruction group, but is not turned non-gc by the code generator // we fix this by copying the GC pointer to a non-gc pointer temp. noway_assert(!varTypeIsGC(dstType) && "How can we have a cast to a GCRef here?"); // We generate an assignment to an int and then do the cast from an int. With this we avoid // the gc problem and we allow casts to bytes, longs, etc... unsigned lclNum = lvaGrabTemp(true DEBUGARG("Cast away GC")); oper->gtType = TYP_I_IMPL; GenTree* asg = gtNewTempAssign(lclNum, oper); oper->gtType = srcType; // do the real cast GenTree* cast = gtNewCastNode(tree->TypeGet(), gtNewLclvNode(lclNum, TYP_I_IMPL), false, dstType); // Generate the comma tree oper = gtNewOperNode(GT_COMMA, tree->TypeGet(), asg, cast); return fgMorphTree(oper); } // Look for narrowing casts ([u]long -> [u]int) and try to push them // down into the operand before morphing it. // // It doesn't matter if this is cast is from ulong or long (i.e. if // GTF_UNSIGNED is set) because the transformation is only applied to // overflow-insensitive narrowing casts, which always silently truncate. // // Note that casts from [u]long to small integer types are handled above. if ((srcType == TYP_LONG) && ((dstType == TYP_INT) || (dstType == TYP_UINT))) { // As a special case, look for overflow-sensitive casts of an AND // expression, and see if the second operand is a small constant. Since // the result of an AND is bound by its smaller operand, it may be // possible to prove that the cast won't overflow, which will in turn // allow the cast's operand to be transformed. if (tree->gtOverflow() && (oper->OperGet() == GT_AND)) { GenTree* andOp2 = oper->AsOp()->gtOp2; // Look for a constant less than 2^{32} for a cast to uint, or less // than 2^{31} for a cast to int. int maxWidth = (dstType == TYP_UINT) ? 32 : 31; if ((andOp2->OperGet() == GT_CNS_NATIVELONG) && ((andOp2->AsIntConCommon()->LngValue() >> maxWidth) == 0)) { tree->ClearOverflow(); tree->SetAllEffectsFlags(oper); } } // Only apply this transformation during global morph, // when neither the cast node nor the oper node may throw an exception // based on the upper 32 bits. // if (fgGlobalMorph && !tree->gtOverflow() && !oper->gtOverflowEx()) { // For these operations the lower 32 bits of the result only depends // upon the lower 32 bits of the operands. // bool canPushCast = oper->OperIs(GT_ADD, GT_SUB, GT_MUL, GT_AND, GT_OR, GT_XOR, GT_NOT, GT_NEG); // For long LSH cast to int, there is a discontinuity in behavior // when the shift amount is 32 or larger. // // CAST(INT, LSH(1LL, 31)) == LSH(1, 31) // LSH(CAST(INT, 1LL), CAST(INT, 31)) == LSH(1, 31) // // CAST(INT, LSH(1LL, 32)) == 0 // LSH(CAST(INT, 1LL), CAST(INT, 32)) == LSH(1, 32) == LSH(1, 0) == 1 // // So some extra validation is needed. // if (oper->OperIs(GT_LSH)) { GenTree* shiftAmount = oper->AsOp()->gtOp2; // Expose constant value for shift, if possible, to maximize the number // of cases we can handle. shiftAmount = gtFoldExpr(shiftAmount); oper->AsOp()->gtOp2 = shiftAmount; #if DEBUG // We may remorph the shift amount tree again later, so clear any morphed flag. shiftAmount->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; #endif // DEBUG if (shiftAmount->IsIntegralConst()) { const ssize_t shiftAmountValue = shiftAmount->AsIntCon()->IconValue(); if ((shiftAmountValue >= 64) || (shiftAmountValue < 0)) { // Shift amount is large enough or negative so result is undefined. // Don't try to optimize. assert(!canPushCast); } else if (shiftAmountValue >= 32) { // We know that we have a narrowing cast ([u]long -> [u]int) // and that we are casting to a 32-bit value, which will result in zero. // // Check to see if we have any side-effects that we must keep // if ((tree->gtFlags & GTF_ALL_EFFECT) == 0) { // Result of the shift is zero. DEBUG_DESTROY_NODE(tree); GenTree* zero = gtNewZeroConNode(TYP_INT); return fgMorphTree(zero); } else // We do have a side-effect { // We could create a GT_COMMA node here to keep the side-effect and return a zero // Instead we just don't try to optimize this case. canPushCast = false; } } else { // Shift amount is positive and small enough that we can push the cast through. canPushCast = true; } } else { // Shift amount is unknown. We can't optimize this case. assert(!canPushCast); } } if (canPushCast) { DEBUG_DESTROY_NODE(tree); // Insert narrowing casts for op1 and op2. oper->AsOp()->gtOp1 = gtNewCastNode(TYP_INT, oper->AsOp()->gtOp1, false, dstType); if (oper->AsOp()->gtOp2 != nullptr) { oper->AsOp()->gtOp2 = gtNewCastNode(TYP_INT, oper->AsOp()->gtOp2, false, dstType); } // Clear the GT_MUL_64RSLT if it is set. if (oper->gtOper == GT_MUL && (oper->gtFlags & GTF_MUL_64RSLT)) { oper->gtFlags &= ~GTF_MUL_64RSLT; } // The operation now produces a 32-bit result. oper->gtType = TYP_INT; // Remorph the new tree as the casts that we added may be folded away. return fgMorphTree(oper); } } } return nullptr; } #ifdef DEBUG const char* getNonStandardArgKindName(NonStandardArgKind kind) { switch (kind) { case NonStandardArgKind::None: return "None"; case NonStandardArgKind::PInvokeFrame: return "PInvokeFrame"; case NonStandardArgKind::PInvokeTarget: return "PInvokeTarget"; case NonStandardArgKind::PInvokeCookie: return "PInvokeCookie"; case NonStandardArgKind::WrapperDelegateCell: return "WrapperDelegateCell"; case NonStandardArgKind::ShiftLow: return "ShiftLow"; case NonStandardArgKind::ShiftHigh: return "ShiftHigh"; case NonStandardArgKind::FixedRetBuffer: return "FixedRetBuffer"; case NonStandardArgKind::VirtualStubCell: return "VirtualStubCell"; case NonStandardArgKind::R2RIndirectionCell: return "R2RIndirectionCell"; case NonStandardArgKind::ValidateIndirectCallTarget: return "ValidateIndirectCallTarget"; default: unreached(); } } void fgArgTabEntry::Dump() const { printf("fgArgTabEntry[arg %u", argNum); printf(" %d.%s", GetNode()->gtTreeID, GenTree::OpName(GetNode()->OperGet())); printf(" %s", varTypeName(argType)); printf(" (%s)", passedByRef ? "By ref" : "By value"); if (GetRegNum() != REG_STK) { printf(", %u reg%s:", numRegs, numRegs == 1 ? "" : "s"); for (unsigned i = 0; i < numRegs; i++) { printf(" %s", getRegName(regNums[i])); } } if (GetStackByteSize() > 0) { #if defined(DEBUG_ARG_SLOTS) printf(", numSlots=%u, slotNum=%u, byteSize=%u, byteOffset=%u", numSlots, slotNum, m_byteSize, m_byteOffset); #else printf(", byteSize=%u, byteOffset=%u", m_byteSize, m_byteOffset); #endif } printf(", byteAlignment=%u", m_byteAlignment); if (isLateArg()) { printf(", lateArgInx=%u", GetLateArgInx()); } if (IsSplit()) { printf(", isSplit"); } if (needTmp) { printf(", tmpNum=V%02u", tmpNum); } if (needPlace) { printf(", needPlace"); } if (isTmp) { printf(", isTmp"); } if (processed) { printf(", processed"); } if (IsHfaRegArg()) { printf(", isHfa(%s)", varTypeName(GetHfaType())); } if (isBackFilled) { printf(", isBackFilled"); } if (nonStandardArgKind != NonStandardArgKind::None) { printf(", nonStandard[%s]", getNonStandardArgKindName(nonStandardArgKind)); } if (isStruct) { printf(", isStruct"); } printf("]\n"); } #endif fgArgInfo::fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned numArgs) { compiler = comp; callTree = call; argCount = 0; // filled in arg count, starts at zero DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;) nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE; stkLevel = 0; #if defined(UNIX_X86_ABI) alignmentDone = false; stkSizeBytes = 0; padStkAlign = 0; #endif #if FEATURE_FIXED_OUT_ARGS outArgSize = 0; #endif argTableSize = numArgs; // the allocated table size hasRegArgs = false; hasStackArgs = false; argsComplete = false; argsSorted = false; needsTemps = false; if (argTableSize == 0) { argTable = nullptr; } else { argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argTableSize]; } } /***************************************************************************** * * fgArgInfo Copy Constructor * * This method needs to act like a copy constructor for fgArgInfo. * The newCall needs to have its fgArgInfo initialized such that * we have newCall that is an exact copy of the oldCall. * We have to take care since the argument information * in the argTable contains pointers that must point to the * new arguments and not the old arguments. */ fgArgInfo::fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall) { fgArgInfo* oldArgInfo = oldCall->AsCall()->fgArgInfo; compiler = oldArgInfo->compiler; callTree = newCall; argCount = 0; // filled in arg count, starts at zero DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;) nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE; stkLevel = oldArgInfo->stkLevel; #if defined(UNIX_X86_ABI) alignmentDone = oldArgInfo->alignmentDone; stkSizeBytes = oldArgInfo->stkSizeBytes; padStkAlign = oldArgInfo->padStkAlign; #endif #if FEATURE_FIXED_OUT_ARGS outArgSize = oldArgInfo->outArgSize; #endif argTableSize = oldArgInfo->argTableSize; argsComplete = false; argTable = nullptr; assert(oldArgInfo->argsComplete); if (argTableSize > 0) { argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argTableSize]; // Copy the old arg entries for (unsigned i = 0; i < argTableSize; i++) { argTable[i] = new (compiler, CMK_fgArgInfo) fgArgTabEntry(*oldArgInfo->argTable[i]); } // The copied arg entries contain pointers to old uses, they need // to be updated to point to new uses. if (newCall->gtCallThisArg != nullptr) { for (unsigned i = 0; i < argTableSize; i++) { if (argTable[i]->use == oldCall->gtCallThisArg) { argTable[i]->use = newCall->gtCallThisArg; break; } } } GenTreeCall::UseIterator newUse = newCall->Args().begin(); GenTreeCall::UseIterator newUseEnd = newCall->Args().end(); GenTreeCall::UseIterator oldUse = oldCall->Args().begin(); GenTreeCall::UseIterator oldUseEnd = newCall->Args().end(); for (; newUse != newUseEnd; ++newUse, ++oldUse) { for (unsigned i = 0; i < argTableSize; i++) { if (argTable[i]->use == oldUse.GetUse()) { argTable[i]->use = newUse.GetUse(); break; } } } newUse = newCall->LateArgs().begin(); newUseEnd = newCall->LateArgs().end(); oldUse = oldCall->LateArgs().begin(); oldUseEnd = newCall->LateArgs().end(); for (; newUse != newUseEnd; ++newUse, ++oldUse) { for (unsigned i = 0; i < argTableSize; i++) { if (argTable[i]->lateUse == oldUse.GetUse()) { argTable[i]->lateUse = newUse.GetUse(); break; } } } } argCount = oldArgInfo->argCount; DEBUG_ARG_SLOTS_ONLY(nextSlotNum = oldArgInfo->nextSlotNum;) nextStackByteOffset = oldArgInfo->nextStackByteOffset; hasRegArgs = oldArgInfo->hasRegArgs; hasStackArgs = oldArgInfo->hasStackArgs; argsComplete = true; argsSorted = true; } void fgArgInfo::AddArg(fgArgTabEntry* curArgTabEntry) { assert(argCount < argTableSize); argTable[argCount] = curArgTabEntry; argCount++; } fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg /*=false*/) { fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry; // Any additional register numbers are set by the caller. // This is primarily because on ARM we don't yet know if it // will be split or if it is a double HFA, so the number of registers // may actually be less. curArgTabEntry->setRegNum(0, regNum); curArgTabEntry->argNum = argNum; curArgTabEntry->argType = node->TypeGet(); curArgTabEntry->use = use; curArgTabEntry->lateUse = nullptr; curArgTabEntry->numRegs = numRegs; #if defined(DEBUG_ARG_SLOTS) curArgTabEntry->slotNum = 0; curArgTabEntry->numSlots = 0; #endif curArgTabEntry->SetLateArgInx(UINT_MAX); curArgTabEntry->tmpNum = BAD_VAR_NUM; curArgTabEntry->SetSplit(false); curArgTabEntry->isTmp = false; curArgTabEntry->needTmp = false; curArgTabEntry->needPlace = false; curArgTabEntry->processed = false; if (GlobalJitOptions::compFeatureHfa) { curArgTabEntry->SetHfaElemKind(CORINFO_HFA_ELEM_NONE); } curArgTabEntry->isBackFilled = false; curArgTabEntry->nonStandardArgKind = NonStandardArgKind::None; curArgTabEntry->isStruct = isStruct; curArgTabEntry->SetIsVararg(isVararg); curArgTabEntry->SetByteAlignment(byteAlignment); curArgTabEntry->SetByteSize(byteSize, isStruct, isFloatHfa); curArgTabEntry->SetByteOffset(0); hasRegArgs = true; if (argCount >= argTableSize) { fgArgTabEntry** oldTable = argTable; argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argCount + 1]; memcpy(argTable, oldTable, argCount * sizeof(fgArgTabEntry*)); argTableSize++; } AddArg(curArgTabEntry); return curArgTabEntry; } #if defined(UNIX_AMD64_ABI) fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, const bool isStruct, const bool isFloatHfa, const bool isVararg, const regNumber otherRegNum, const unsigned structIntRegs, const unsigned structFloatRegs, const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr) { fgArgTabEntry* curArgTabEntry = AddRegArg(argNum, node, use, regNum, numRegs, byteSize, byteAlignment, isStruct, isFloatHfa, isVararg); assert(curArgTabEntry != nullptr); curArgTabEntry->isStruct = isStruct; // is this a struct arg curArgTabEntry->structIntRegs = structIntRegs; curArgTabEntry->structFloatRegs = structFloatRegs; INDEBUG(curArgTabEntry->checkIsStruct();) assert(numRegs <= 2); if (numRegs == 2) { curArgTabEntry->setRegNum(1, otherRegNum); } if (isStruct && structDescPtr != nullptr) { curArgTabEntry->structDesc.CopyFrom(*structDescPtr); } return curArgTabEntry; } #endif // defined(UNIX_AMD64_ABI) fgArgTabEntry* fgArgInfo::AddStkArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, unsigned numSlots, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg /*=false*/) { fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry; #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { nextSlotNum = roundUp(nextSlotNum, byteAlignment / TARGET_POINTER_SIZE); } #endif nextStackByteOffset = roundUp(nextStackByteOffset, byteAlignment); DEBUG_ARG_SLOTS_ASSERT(nextStackByteOffset / TARGET_POINTER_SIZE == nextSlotNum); curArgTabEntry->setRegNum(0, REG_STK); curArgTabEntry->argNum = argNum; curArgTabEntry->argType = node->TypeGet(); curArgTabEntry->use = use; curArgTabEntry->lateUse = nullptr; #if defined(DEBUG_ARG_SLOTS) curArgTabEntry->numSlots = numSlots; curArgTabEntry->slotNum = nextSlotNum; #endif curArgTabEntry->numRegs = 0; #if defined(UNIX_AMD64_ABI) curArgTabEntry->structIntRegs = 0; curArgTabEntry->structFloatRegs = 0; #endif // defined(UNIX_AMD64_ABI) curArgTabEntry->SetLateArgInx(UINT_MAX); curArgTabEntry->tmpNum = BAD_VAR_NUM; curArgTabEntry->SetSplit(false); curArgTabEntry->isTmp = false; curArgTabEntry->needTmp = false; curArgTabEntry->needPlace = false; curArgTabEntry->processed = false; if (GlobalJitOptions::compFeatureHfa) { curArgTabEntry->SetHfaElemKind(CORINFO_HFA_ELEM_NONE); } curArgTabEntry->isBackFilled = false; curArgTabEntry->nonStandardArgKind = NonStandardArgKind::None; curArgTabEntry->isStruct = isStruct; curArgTabEntry->SetIsVararg(isVararg); curArgTabEntry->SetByteAlignment(byteAlignment); curArgTabEntry->SetByteSize(byteSize, isStruct, isFloatHfa); curArgTabEntry->SetByteOffset(nextStackByteOffset); hasStackArgs = true; AddArg(curArgTabEntry); DEBUG_ARG_SLOTS_ONLY(nextSlotNum += numSlots;) nextStackByteOffset += curArgTabEntry->GetByteSize(); return curArgTabEntry; } void fgArgInfo::RemorphReset() { DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;) nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE; } //------------------------------------------------------------------------ // UpdateRegArg: Update the given fgArgTabEntry while morphing. // // Arguments: // curArgTabEntry - the fgArgTabEntry to update. // node - the tree node that defines the argument // reMorphing - a boolean value indicate whether we are remorphing the call // // Assumptions: // This must have already been determined to be at least partially passed in registers. // void fgArgInfo::UpdateRegArg(fgArgTabEntry* curArgTabEntry, GenTree* node, bool reMorphing) { bool isLateArg = curArgTabEntry->isLateArg(); // If this is a late arg, we'd better be updating it with a correctly marked node, and vice-versa. assert((isLateArg && ((node->gtFlags & GTF_LATE_ARG) != 0)) || (!isLateArg && ((node->gtFlags & GTF_LATE_ARG) == 0))); assert(curArgTabEntry->numRegs != 0); assert(curArgTabEntry->use->GetNode() == node); } //------------------------------------------------------------------------ // UpdateStkArg: Update the given fgArgTabEntry while morphing. // // Arguments: // curArgTabEntry - the fgArgTabEntry to update. // node - the tree node that defines the argument // reMorphing - a boolean value indicate whether we are remorphing the call // // Assumptions: // This must have already been determined to be passed on the stack. // void fgArgInfo::UpdateStkArg(fgArgTabEntry* curArgTabEntry, GenTree* node, bool reMorphing) { bool isLateArg = curArgTabEntry->isLateArg(); // If this is a late arg, we'd better be updating it with a correctly marked node, and vice-versa. assert((isLateArg && ((node->gtFlags & GTF_LATE_ARG) != 0)) || (!isLateArg && ((node->gtFlags & GTF_LATE_ARG) == 0))); noway_assert(curArgTabEntry->use != callTree->gtCallThisArg); assert((curArgTabEntry->GetRegNum() == REG_STK) || curArgTabEntry->IsSplit()); assert(curArgTabEntry->use->GetNode() == node); #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { nextSlotNum = roundUp(nextSlotNum, curArgTabEntry->GetByteAlignment() / TARGET_POINTER_SIZE); assert(curArgTabEntry->slotNum == nextSlotNum); nextSlotNum += curArgTabEntry->numSlots; } #endif nextStackByteOffset = roundUp(nextStackByteOffset, curArgTabEntry->GetByteAlignment()); assert(curArgTabEntry->GetByteOffset() == nextStackByteOffset); nextStackByteOffset += curArgTabEntry->GetStackByteSize(); } void fgArgInfo::SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots) { fgArgTabEntry* curArgTabEntry = nullptr; assert(argNum < argCount); for (unsigned inx = 0; inx < argCount; inx++) { curArgTabEntry = argTable[inx]; if (curArgTabEntry->argNum == argNum) { break; } } assert(numRegs > 0); assert(numSlots > 0); if (argsComplete) { assert(curArgTabEntry->IsSplit() == true); assert(curArgTabEntry->numRegs == numRegs); DEBUG_ARG_SLOTS_ONLY(assert(curArgTabEntry->numSlots == numSlots);) assert(hasStackArgs == true); } else { curArgTabEntry->SetSplit(true); curArgTabEntry->numRegs = numRegs; DEBUG_ARG_SLOTS_ONLY(curArgTabEntry->numSlots = numSlots;) curArgTabEntry->SetByteOffset(0); hasStackArgs = true; } DEBUG_ARG_SLOTS_ONLY(nextSlotNum += numSlots;) // TODO-Cleanup: structs are aligned to 8 bytes on arm64 apple, so it would work, but pass the precise size. nextStackByteOffset += numSlots * TARGET_POINTER_SIZE; } //------------------------------------------------------------------------ // EvalToTmp: Replace the node in the given fgArgTabEntry with a temp // // Arguments: // curArgTabEntry - the fgArgTabEntry for the argument // tmpNum - the varNum for the temp // newNode - the assignment of the argument value to the temp // // Notes: // Although the name of this method is EvalToTmp, it doesn't actually create // the temp or the copy. // void fgArgInfo::EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode) { assert(curArgTabEntry->use != callTree->gtCallThisArg); assert(curArgTabEntry->use->GetNode() == newNode); assert(curArgTabEntry->GetNode() == newNode); curArgTabEntry->tmpNum = tmpNum; curArgTabEntry->isTmp = true; } void fgArgInfo::ArgsComplete() { bool hasStructRegArg = false; for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; assert(curArgTabEntry != nullptr); GenTree* argx = curArgTabEntry->GetNode(); if (curArgTabEntry->GetRegNum() == REG_STK) { assert(hasStackArgs == true); #if !FEATURE_FIXED_OUT_ARGS // On x86 we use push instructions to pass arguments: // The non-register arguments are evaluated and pushed in order // and they are never evaluated into temps // continue; #endif } #if FEATURE_ARG_SPLIT else if (curArgTabEntry->IsSplit()) { hasStructRegArg = true; assert(hasStackArgs == true); } #endif // FEATURE_ARG_SPLIT else // we have a register argument, next we look for a struct type. { if (varTypeIsStruct(argx) UNIX_AMD64_ABI_ONLY(|| curArgTabEntry->isStruct)) { hasStructRegArg = true; } } /* If the argument tree contains an assignment (GTF_ASG) then the argument and and every earlier argument (except constants) must be evaluated into temps since there may be other arguments that follow and they may use the value being assigned. EXAMPLE: ArgTab is "a, a=5, a" -> when we see the second arg "a=5" we know the first two arguments "a, a=5" have to be evaluated into temps For the case of an assignment, we only know that there exist some assignment someplace in the tree. We don't know what is being assigned so we are very conservative here and assume that any local variable could have been assigned. */ if (argx->gtFlags & GTF_ASG) { // If this is not the only argument, or it's a copyblk, or it already evaluates the expression to // a tmp, then we need a temp in the late arg list. if ((argCount > 1) || argx->OperIsCopyBlkOp() #ifdef FEATURE_FIXED_OUT_ARGS || curArgTabEntry->isTmp // I protect this by "FEATURE_FIXED_OUT_ARGS" to preserve the property // that we only have late non-register args when that feature is on. #endif // FEATURE_FIXED_OUT_ARGS ) { curArgTabEntry->needTmp = true; needsTemps = true; } // For all previous arguments, unless they are a simple constant // we require that they be evaluated into temps for (unsigned prevInx = 0; prevInx < curInx; prevInx++) { fgArgTabEntry* prevArgTabEntry = argTable[prevInx]; assert(prevArgTabEntry->argNum < curArgTabEntry->argNum); if (!prevArgTabEntry->GetNode()->IsInvariant()) { prevArgTabEntry->needTmp = true; needsTemps = true; } } } bool treatLikeCall = ((argx->gtFlags & GTF_CALL) != 0); #if FEATURE_FIXED_OUT_ARGS // Like calls, if this argument has a tree that will do an inline throw, // a call to a jit helper, then we need to treat it like a call (but only // if there are/were any stack args). // This means unnesting, sorting, etc. Technically this is overly // conservative, but I want to avoid as much special-case debug-only code // as possible, so leveraging the GTF_CALL flag is the easiest. // if (!treatLikeCall && (argx->gtFlags & GTF_EXCEPT) && (argCount > 1) && compiler->opts.compDbgCode && (compiler->fgWalkTreePre(&argx, Compiler::fgChkThrowCB) == Compiler::WALK_ABORT)) { for (unsigned otherInx = 0; otherInx < argCount; otherInx++) { if (otherInx == curInx) { continue; } if (argTable[otherInx]->GetRegNum() == REG_STK) { treatLikeCall = true; break; } } } #endif // FEATURE_FIXED_OUT_ARGS /* If it contains a call (GTF_CALL) then itself and everything before the call with a GLOB_EFFECT must eval to temp (this is because everything with SIDE_EFFECT has to be kept in the right order since we will move the call to the first position) For calls we don't have to be quite as conservative as we are with an assignment since the call won't be modifying any non-address taken LclVars. */ if (treatLikeCall) { if (argCount > 1) // If this is not the only argument { curArgTabEntry->needTmp = true; needsTemps = true; } else if (varTypeIsFloating(argx->TypeGet()) && (argx->OperGet() == GT_CALL)) { // Spill all arguments that are floating point calls curArgTabEntry->needTmp = true; needsTemps = true; } // All previous arguments may need to be evaluated into temps for (unsigned prevInx = 0; prevInx < curInx; prevInx++) { fgArgTabEntry* prevArgTabEntry = argTable[prevInx]; assert(prevArgTabEntry->argNum < curArgTabEntry->argNum); // For all previous arguments, if they have any GTF_ALL_EFFECT // we require that they be evaluated into a temp if ((prevArgTabEntry->GetNode()->gtFlags & GTF_ALL_EFFECT) != 0) { prevArgTabEntry->needTmp = true; needsTemps = true; } #if FEATURE_FIXED_OUT_ARGS // Or, if they are stored into the FIXED_OUT_ARG area // we require that they be moved to the gtCallLateArgs // and replaced with a placeholder node else if (prevArgTabEntry->GetRegNum() == REG_STK) { prevArgTabEntry->needPlace = true; } #if FEATURE_ARG_SPLIT else if (prevArgTabEntry->IsSplit()) { prevArgTabEntry->needPlace = true; } #endif // FEATURE_ARG_SPLIT #endif } } #if FEATURE_MULTIREG_ARGS // For RyuJIT backend we will expand a Multireg arg into a GT_FIELD_LIST // with multiple indirections, so here we consider spilling it into a tmp LclVar. // CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM bool isMultiRegArg = (curArgTabEntry->numRegs > 0) && (curArgTabEntry->numRegs + curArgTabEntry->GetStackSlotsNumber() > 1); #else bool isMultiRegArg = (curArgTabEntry->numRegs > 1); #endif if ((varTypeIsStruct(argx->TypeGet())) && (curArgTabEntry->needTmp == false)) { if (isMultiRegArg && ((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0)) { // Spill multireg struct arguments that have Assignments or Calls embedded in them curArgTabEntry->needTmp = true; needsTemps = true; } else { // We call gtPrepareCost to measure the cost of evaluating this tree compiler->gtPrepareCost(argx); if (isMultiRegArg && (argx->GetCostEx() > (6 * IND_COST_EX))) { // Spill multireg struct arguments that are expensive to evaluate twice curArgTabEntry->needTmp = true; needsTemps = true; } #if defined(FEATURE_SIMD) && defined(TARGET_ARM64) else if (isMultiRegArg && varTypeIsSIMD(argx->TypeGet())) { // SIMD types do not need the optimization below due to their sizes if (argx->OperIsSimdOrHWintrinsic() || (argx->OperIs(GT_OBJ) && argx->AsObj()->gtOp1->OperIs(GT_ADDR) && argx->AsObj()->gtOp1->AsOp()->gtOp1->OperIsSimdOrHWintrinsic())) { curArgTabEntry->needTmp = true; needsTemps = true; } } #endif #ifndef TARGET_ARM // TODO-Arm: This optimization is not implemented for ARM32 // so we skip this for ARM32 until it is ported to use RyuJIT backend // else if (argx->OperGet() == GT_OBJ) { GenTreeObj* argObj = argx->AsObj(); unsigned structSize = argObj->GetLayout()->GetSize(); switch (structSize) { case 3: case 5: case 6: case 7: // If we have a stack based LclVar we can perform a wider read of 4 or 8 bytes // if (argObj->AsObj()->gtOp1->IsLocalAddrExpr() == nullptr) // Is the source not a LclVar? { // If we don't have a LclVar we need to read exactly 3,5,6 or 7 bytes // For now we use a a GT_CPBLK to copy the exact size into a GT_LCL_VAR temp. // curArgTabEntry->needTmp = true; needsTemps = true; } break; case 11: case 13: case 14: case 15: // Spill any GT_OBJ multireg structs that are difficult to extract // // When we have a GT_OBJ of a struct with the above sizes we would need // to use 3 or 4 load instructions to load the exact size of this struct. // Instead we spill the GT_OBJ into a new GT_LCL_VAR temp and this sequence // will use a GT_CPBLK to copy the exact size into the GT_LCL_VAR temp. // Then we can just load all 16 bytes of the GT_LCL_VAR temp when passing // the argument. // curArgTabEntry->needTmp = true; needsTemps = true; break; default: break; } } #endif // !TARGET_ARM } } #endif // FEATURE_MULTIREG_ARGS } // We only care because we can't spill structs and qmarks involve a lot of spilling, but // if we don't have qmarks, then it doesn't matter. // So check for Qmark's globally once here, instead of inside the loop. // const bool hasStructRegArgWeCareAbout = (hasStructRegArg && compiler->compQmarkUsed); #if FEATURE_FIXED_OUT_ARGS // For Arm/x64 we only care because we can't reorder a register // argument that uses GT_LCLHEAP. This is an optimization to // save a check inside the below loop. // const bool hasStackArgsWeCareAbout = (hasStackArgs && compiler->compLocallocUsed); #else const bool hasStackArgsWeCareAbout = hasStackArgs; #endif // FEATURE_FIXED_OUT_ARGS // If we have any stack args we have to force the evaluation // of any arguments passed in registers that might throw an exception // // Technically we only a required to handle the following two cases: // a GT_IND with GTF_IND_RNGCHK (only on x86) or // a GT_LCLHEAP node that allocates stuff on the stack // if (hasStackArgsWeCareAbout || hasStructRegArgWeCareAbout) { for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; assert(curArgTabEntry != nullptr); GenTree* argx = curArgTabEntry->GetNode(); // Examine the register args that are currently not marked needTmp // if (!curArgTabEntry->needTmp && (curArgTabEntry->GetRegNum() != REG_STK)) { if (hasStackArgsWeCareAbout) { #if !FEATURE_FIXED_OUT_ARGS // On x86 we previously recorded a stack depth of zero when // morphing the register arguments of any GT_IND with a GTF_IND_RNGCHK flag // Thus we can not reorder the argument after any stack based argument // (Note that GT_LCLHEAP sets the GTF_EXCEPT flag so we don't need to // check for it explicitly.) // if (argx->gtFlags & GTF_EXCEPT) { curArgTabEntry->needTmp = true; needsTemps = true; continue; } #else // For Arm/X64 we can't reorder a register argument that uses a GT_LCLHEAP // if (argx->gtFlags & GTF_EXCEPT) { assert(compiler->compLocallocUsed); // Returns WALK_ABORT if a GT_LCLHEAP node is encountered in the argx tree // if (compiler->fgWalkTreePre(&argx, Compiler::fgChkLocAllocCB) == Compiler::WALK_ABORT) { curArgTabEntry->needTmp = true; needsTemps = true; continue; } } #endif } if (hasStructRegArgWeCareAbout) { // Returns true if a GT_QMARK node is encountered in the argx tree // if (compiler->fgWalkTreePre(&argx, Compiler::fgChkQmarkCB) == Compiler::WALK_ABORT) { curArgTabEntry->needTmp = true; needsTemps = true; continue; } } } } } // When CFG is enabled and this is a delegate call or vtable call we must // compute the call target before all late args. However this will // effectively null-check 'this', which should happen only after all // arguments are evaluated. Thus we must evaluate all args with side // effects to a temp. if (compiler->opts.IsCFGEnabled() && (callTree->IsVirtualVtable() || callTree->IsDelegateInvoke())) { // Always evaluate 'this' to temp. argTable[0]->needTmp = true; needsTemps = true; for (unsigned curInx = 1; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; GenTree* arg = curArgTabEntry->GetNode(); if ((arg->gtFlags & GTF_ALL_EFFECT) != 0) { curArgTabEntry->needTmp = true; needsTemps = true; } } } argsComplete = true; } void fgArgInfo::SortArgs() { assert(argsComplete == true); #ifdef DEBUG if (compiler->verbose) { printf("\nSorting the arguments:\n"); } #endif /* Shuffle the arguments around before we build the gtCallLateArgs list. The idea is to move all "simple" arguments like constants and local vars to the end of the table, and move the complex arguments towards the beginning of the table. This will help prevent registers from being spilled by allowing us to evaluate the more complex arguments before the simpler arguments. The argTable ends up looking like: +------------------------------------+ <--- argTable[argCount - 1] | constants | +------------------------------------+ | local var / local field | +------------------------------------+ | remaining arguments sorted by cost | +------------------------------------+ | temps (argTable[].needTmp = true) | +------------------------------------+ | args with calls (GTF_CALL) | +------------------------------------+ <--- argTable[0] */ /* Set the beginning and end for the new argument table */ unsigned curInx; int regCount = 0; unsigned begTab = 0; unsigned endTab = argCount - 1; unsigned argsRemaining = argCount; // First take care of arguments that are constants. // [We use a backward iterator pattern] // curInx = argCount; do { curInx--; fgArgTabEntry* curArgTabEntry = argTable[curInx]; if (curArgTabEntry->GetRegNum() != REG_STK) { regCount++; } assert(curArgTabEntry->lateUse == nullptr); // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); // put constants at the end of the table // if (argx->gtOper == GT_CNS_INT) { noway_assert(curInx <= endTab); curArgTabEntry->processed = true; // place curArgTabEntry at the endTab position by performing a swap // if (curInx != endTab) { argTable[curInx] = argTable[endTab]; argTable[endTab] = curArgTabEntry; } endTab--; argsRemaining--; } } } while (curInx > 0); if (argsRemaining > 0) { // Next take care of arguments that are calls. // [We use a forward iterator pattern] // for (curInx = begTab; curInx <= endTab; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); // put calls at the beginning of the table // if (argx->gtFlags & GTF_CALL) { curArgTabEntry->processed = true; // place curArgTabEntry at the begTab position by performing a swap // if (curInx != begTab) { argTable[curInx] = argTable[begTab]; argTable[begTab] = curArgTabEntry; } begTab++; argsRemaining--; } } } } if (argsRemaining > 0) { // Next take care arguments that are temps. // These temps come before the arguments that are // ordinary local vars or local fields // since this will give them a better chance to become // enregistered into their actual argument register. // [We use a forward iterator pattern] // for (curInx = begTab; curInx <= endTab; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { if (curArgTabEntry->needTmp) { curArgTabEntry->processed = true; // place curArgTabEntry at the begTab position by performing a swap // if (curInx != begTab) { argTable[curInx] = argTable[begTab]; argTable[begTab] = curArgTabEntry; } begTab++; argsRemaining--; } } } } if (argsRemaining > 0) { // Next take care of local var and local field arguments. // These are moved towards the end of the argument evaluation. // [We use a backward iterator pattern] // curInx = endTab + 1; do { curInx--; fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); if ((argx->gtOper == GT_LCL_VAR) || (argx->gtOper == GT_LCL_FLD)) { noway_assert(curInx <= endTab); curArgTabEntry->processed = true; // place curArgTabEntry at the endTab position by performing a swap // if (curInx != endTab) { argTable[curInx] = argTable[endTab]; argTable[endTab] = curArgTabEntry; } endTab--; argsRemaining--; } } } while (curInx > begTab); } // Finally, take care of all the remaining arguments. // Note that we fill in one arg at a time using a while loop. bool costsPrepared = false; // Only prepare tree costs once, the first time through this loop while (argsRemaining > 0) { /* Find the most expensive arg remaining and evaluate it next */ fgArgTabEntry* expensiveArgTabEntry = nullptr; unsigned expensiveArg = UINT_MAX; unsigned expensiveArgCost = 0; // [We use a forward iterator pattern] // for (curInx = begTab; curInx <= endTab; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); // We should have already handled these kinds of args assert(argx->gtOper != GT_LCL_VAR); assert(argx->gtOper != GT_LCL_FLD); assert(argx->gtOper != GT_CNS_INT); // This arg should either have no persistent side effects or be the last one in our table // assert(((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) == 0) || (curInx == (argCount-1))); if (argsRemaining == 1) { // This is the last arg to place expensiveArg = curInx; expensiveArgTabEntry = curArgTabEntry; assert(begTab == endTab); break; } else { if (!costsPrepared) { /* We call gtPrepareCost to measure the cost of evaluating this tree */ compiler->gtPrepareCost(argx); } if (argx->GetCostEx() > expensiveArgCost) { // Remember this arg as the most expensive one that we have yet seen expensiveArgCost = argx->GetCostEx(); expensiveArg = curInx; expensiveArgTabEntry = curArgTabEntry; } } } } noway_assert(expensiveArg != UINT_MAX); // put the most expensive arg towards the beginning of the table expensiveArgTabEntry->processed = true; // place expensiveArgTabEntry at the begTab position by performing a swap // if (expensiveArg != begTab) { argTable[expensiveArg] = argTable[begTab]; argTable[begTab] = expensiveArgTabEntry; } begTab++; argsRemaining--; costsPrepared = true; // If we have more expensive arguments, don't re-evaluate the tree cost on the next loop } // The table should now be completely filled and thus begTab should now be adjacent to endTab // and regArgsRemaining should be zero assert(begTab == (endTab + 1)); assert(argsRemaining == 0); argsSorted = true; } #ifdef DEBUG void fgArgInfo::Dump(Compiler* compiler) const { for (unsigned curInx = 0; curInx < ArgCount(); curInx++) { fgArgTabEntry* curArgEntry = ArgTable()[curInx]; curArgEntry->Dump(); } } #endif //------------------------------------------------------------------------------ // fgMakeTmpArgNode : This function creates a tmp var only if needed. // We need this to be done in order to enforce ordering // of the evaluation of arguments. // // Arguments: // curArgTabEntry // // Return Value: // the newly created temp var tree. GenTree* Compiler::fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry) { unsigned tmpVarNum = curArgTabEntry->tmpNum; LclVarDsc* varDsc = lvaGetDesc(tmpVarNum); assert(varDsc->lvIsTemp); var_types type = varDsc->TypeGet(); // Create a copy of the temp to go into the late argument list GenTree* arg = gtNewLclvNode(tmpVarNum, type); GenTree* addrNode = nullptr; if (varTypeIsStruct(type)) { #if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) // Can this type be passed as a primitive type? // If so, the following call will return the corresponding primitive type. // Otherwise, it will return TYP_UNKNOWN and we will pass it as a struct type. bool passedAsPrimitive = false; if (curArgTabEntry->TryPassAsPrimitive()) { CORINFO_CLASS_HANDLE clsHnd = varDsc->GetStructHnd(); var_types structBaseType = getPrimitiveTypeForStruct(lvaLclExactSize(tmpVarNum), clsHnd, curArgTabEntry->IsVararg()); if (structBaseType != TYP_UNKNOWN) { passedAsPrimitive = true; #if defined(UNIX_AMD64_ABI) // TODO-Cleanup: This is inelegant, but eventually we'll track this in the fgArgTabEntry, // and otherwise we'd have to either modify getPrimitiveTypeForStruct() to take // a structDesc or call eeGetSystemVAmd64PassStructInRegisterDescriptor yet again. // if (genIsValidFloatReg(curArgTabEntry->GetRegNum())) { if (structBaseType == TYP_INT) { structBaseType = TYP_FLOAT; } else { assert(structBaseType == TYP_LONG); structBaseType = TYP_DOUBLE; } } #endif type = structBaseType; } } // If it is passed in registers, don't get the address of the var. Make it a // field instead. It will be loaded in registers with putarg_reg tree in lower. if (passedAsPrimitive) { arg->ChangeOper(GT_LCL_FLD); arg->gtType = type; lvaSetVarDoNotEnregister(tmpVarNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); } else { var_types addrType = TYP_BYREF; arg = gtNewOperNode(GT_ADDR, addrType, arg); lvaSetVarAddrExposed(tmpVarNum DEBUGARG(AddressExposedReason::ESCAPE_ADDRESS)); addrNode = arg; #if FEATURE_MULTIREG_ARGS #ifdef TARGET_ARM64 assert(varTypeIsStruct(type)); if (lvaIsMultiregStruct(varDsc, curArgTabEntry->IsVararg())) { // We will create a GT_OBJ for the argument below. // This will be passed by value in two registers. assert(addrNode != nullptr); // Create an Obj of the temp to use it as a call argument. arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg); } #else // Always create an Obj of the temp to use it as a call argument. arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg); #endif // !TARGET_ARM64 #endif // FEATURE_MULTIREG_ARGS } #else // not (TARGET_AMD64 or TARGET_ARM64 or TARGET_ARM) // other targets, we pass the struct by value assert(varTypeIsStruct(type)); addrNode = gtNewOperNode(GT_ADDR, TYP_BYREF, arg); // Get a new Obj node temp to use it as a call argument. // gtNewObjNode will set the GTF_EXCEPT flag if this is not a local stack object. arg = gtNewObjNode(lvaGetStruct(tmpVarNum), addrNode); #endif // not (TARGET_AMD64 or TARGET_ARM64 or TARGET_ARM) } // (varTypeIsStruct(type)) if (addrNode != nullptr) { assert(addrNode->gtOper == GT_ADDR); // the child of a GT_ADDR is required to have this flag set addrNode->AsOp()->gtOp1->gtFlags |= GTF_DONT_CSE; } return arg; } //------------------------------------------------------------------------------ // EvalArgsToTemps : Create temp assignments and populate the LateArgs list. void fgArgInfo::EvalArgsToTemps() { assert(argsSorted); unsigned regArgInx = 0; // Now go through the argument table and perform the necessary evaluation into temps GenTreeCall::Use* tmpRegArgNext = nullptr; for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; assert(curArgTabEntry->lateUse == nullptr); GenTree* argx = curArgTabEntry->GetNode(); GenTree* setupArg = nullptr; GenTree* defArg; #if !FEATURE_FIXED_OUT_ARGS // Only ever set for FEATURE_FIXED_OUT_ARGS assert(curArgTabEntry->needPlace == false); // On x86 and other archs that use push instructions to pass arguments: // Only the register arguments need to be replaced with placeholder nodes. // Stacked arguments are evaluated and pushed (or stored into the stack) in order. // if (curArgTabEntry->GetRegNum() == REG_STK) continue; #endif if (curArgTabEntry->needTmp) { if (curArgTabEntry->isTmp) { // Create a copy of the temp to go into the late argument list defArg = compiler->fgMakeTmpArgNode(curArgTabEntry); // mark the original node as a late argument argx->gtFlags |= GTF_LATE_ARG; } else { // Create a temp assignment for the argument // Put the temp in the gtCallLateArgs list CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (compiler->verbose) { printf("Argument with 'side effect'...\n"); compiler->gtDispTree(argx); } #endif #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) noway_assert(argx->gtType != TYP_STRUCT); #endif unsigned tmpVarNum = compiler->lvaGrabTemp(true DEBUGARG("argument with side effect")); if (argx->gtOper == GT_MKREFANY) { // For GT_MKREFANY, typically the actual struct copying does // not have any side-effects and can be delayed. So instead // of using a temp for the whole struct, we can just use a temp // for operand that that has a side-effect GenTree* operand; if ((argx->AsOp()->gtOp2->gtFlags & GTF_ALL_EFFECT) == 0) { operand = argx->AsOp()->gtOp1; // In the early argument evaluation, place an assignment to the temp // from the source operand of the mkrefany setupArg = compiler->gtNewTempAssign(tmpVarNum, operand); // Replace the operand for the mkrefany with the new temp. argx->AsOp()->gtOp1 = compiler->gtNewLclvNode(tmpVarNum, operand->TypeGet()); } else if ((argx->AsOp()->gtOp1->gtFlags & GTF_ALL_EFFECT) == 0) { operand = argx->AsOp()->gtOp2; // In the early argument evaluation, place an assignment to the temp // from the source operand of the mkrefany setupArg = compiler->gtNewTempAssign(tmpVarNum, operand); // Replace the operand for the mkrefany with the new temp. argx->AsOp()->gtOp2 = compiler->gtNewLclvNode(tmpVarNum, operand->TypeGet()); } } if (setupArg != nullptr) { // Now keep the mkrefany for the late argument list defArg = argx; // Clear the side-effect flags because now both op1 and op2 have no side-effects defArg->gtFlags &= ~GTF_ALL_EFFECT; } else { setupArg = compiler->gtNewTempAssign(tmpVarNum, argx); LclVarDsc* varDsc = compiler->lvaGetDesc(tmpVarNum); var_types lclVarType = genActualType(argx->gtType); var_types scalarType = TYP_UNKNOWN; if (setupArg->OperIsCopyBlkOp()) { setupArg = compiler->fgMorphCopyBlock(setupArg); #if defined(TARGET_ARMARCH) || defined(UNIX_AMD64_ABI) if (lclVarType == TYP_STRUCT) { // This scalar LclVar widening step is only performed for ARM architectures. // CORINFO_CLASS_HANDLE clsHnd = compiler->lvaGetStruct(tmpVarNum); unsigned structSize = varDsc->lvExactSize; scalarType = compiler->getPrimitiveTypeForStruct(structSize, clsHnd, curArgTabEntry->IsVararg()); } #endif // TARGET_ARMARCH || defined (UNIX_AMD64_ABI) } // scalarType can be set to a wider type for ARM or unix amd64 architectures: (3 => 4) or (5,6,7 => // 8) if ((scalarType != TYP_UNKNOWN) && (scalarType != lclVarType)) { // Create a GT_LCL_FLD using the wider type to go to the late argument list defArg = compiler->gtNewLclFldNode(tmpVarNum, scalarType, 0); } else { // Create a copy of the temp to go to the late argument list defArg = compiler->gtNewLclvNode(tmpVarNum, lclVarType); } curArgTabEntry->isTmp = true; curArgTabEntry->tmpNum = tmpVarNum; #ifdef TARGET_ARM // Previously we might have thought the local was promoted, and thus the 'COPYBLK' // might have left holes in the used registers (see // fgAddSkippedRegsInPromotedStructArg). // Too bad we're not that smart for these intermediate temps... if (isValidIntArgReg(curArgTabEntry->GetRegNum()) && (curArgTabEntry->numRegs > 1)) { regNumber argReg = curArgTabEntry->GetRegNum(); regMaskTP allUsedRegs = genRegMask(curArgTabEntry->GetRegNum()); for (unsigned i = 1; i < curArgTabEntry->numRegs; i++) { argReg = genRegArgNext(argReg); allUsedRegs |= genRegMask(argReg); } } #endif // TARGET_ARM } /* mark the assignment as a late argument */ setupArg->gtFlags |= GTF_LATE_ARG; #ifdef DEBUG if (compiler->verbose) { printf("\n Evaluate to a temp:\n"); compiler->gtDispTree(setupArg); } #endif } } else // curArgTabEntry->needTmp == false { // On x86 - // Only register args are replaced with placeholder nodes // and the stack based arguments are evaluated and pushed in order. // // On Arm/x64 - When needTmp is false and needPlace is false, // the non-register arguments are evaluated and stored in order. // When needPlace is true we have a nested call that comes after // this argument so we have to replace it in the gtCallArgs list // (the initial argument evaluation list) with a placeholder. // if ((curArgTabEntry->GetRegNum() == REG_STK) && (curArgTabEntry->needPlace == false)) { continue; } /* No temp needed - move the whole node to the gtCallLateArgs list */ /* The argument is deferred and put in the late argument list */ defArg = argx; // Create a placeholder node to put in its place in gtCallLateArgs. // For a struct type we also need to record the class handle of the arg. CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE; #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // All structs are either passed (and retyped) as integral types, OR they // are passed by reference. noway_assert(argx->gtType != TYP_STRUCT); #else // !defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI) if (defArg->TypeGet() == TYP_STRUCT) { clsHnd = compiler->gtGetStructHandleIfPresent(defArg); noway_assert(clsHnd != NO_CLASS_HANDLE); } #endif // !(defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) setupArg = compiler->gtNewArgPlaceHolderNode(defArg->gtType, clsHnd); /* mark the placeholder node as a late argument */ setupArg->gtFlags |= GTF_LATE_ARG; #ifdef DEBUG if (compiler->verbose) { if (curArgTabEntry->GetRegNum() == REG_STK) { printf("Deferred stack argument :\n"); } else { printf("Deferred argument ('%s'):\n", getRegName(curArgTabEntry->GetRegNum())); } compiler->gtDispTree(argx); printf("Replaced with placeholder node:\n"); compiler->gtDispTree(setupArg); } #endif } if (setupArg != nullptr) { noway_assert(curArgTabEntry->use->GetNode() == argx); curArgTabEntry->use->SetNode(setupArg); } /* deferred arg goes into the late argument list */ if (tmpRegArgNext == nullptr) { tmpRegArgNext = compiler->gtNewCallArgs(defArg); callTree->AsCall()->gtCallLateArgs = tmpRegArgNext; } else { noway_assert(tmpRegArgNext->GetNode() != nullptr); tmpRegArgNext->SetNext(compiler->gtNewCallArgs(defArg)); tmpRegArgNext = tmpRegArgNext->GetNext(); } curArgTabEntry->lateUse = tmpRegArgNext; curArgTabEntry->SetLateArgInx(regArgInx++); } #ifdef DEBUG if (compiler->verbose) { printf("\nShuffled argument table: "); for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; if (curArgTabEntry->GetRegNum() != REG_STK) { printf("%s ", getRegName(curArgTabEntry->GetRegNum())); } } printf("\n"); } #endif } //------------------------------------------------------------------------------ // fgMakeMultiUse : If the node is an unaliased local or constant clone it, // otherwise insert a comma form temp // // Arguments: // ppTree - a pointer to the child node we will be replacing with the comma expression that // evaluates ppTree to a temp and returns the result // // Return Value: // A fresh GT_LCL_VAR node referencing the temp which has not been used // // Notes: // Caller must ensure that if the node is an unaliased local, the second use this // creates will be evaluated before the local can be reassigned. // // Can be safely called in morph preorder, before GTF_GLOB_REF is reliable. // GenTree* Compiler::fgMakeMultiUse(GenTree** pOp) { GenTree* const tree = *pOp; if (tree->IsInvariant()) { return gtClone(tree); } else if (tree->IsLocal()) { // Can't rely on GTF_GLOB_REF here. // if (!lvaGetDesc(tree->AsLclVarCommon())->IsAddressExposed()) { return gtClone(tree); } } return fgInsertCommaFormTemp(pOp); } //------------------------------------------------------------------------------ // fgInsertCommaFormTemp: Create a new temporary variable to hold the result of *ppTree, // and replace *ppTree with comma(asg(newLcl, *ppTree), newLcl) // // Arguments: // ppTree - a pointer to the child node we will be replacing with the comma expression that // evaluates ppTree to a temp and returns the result // // structType - value type handle if the temp created is of TYP_STRUCT. // // Return Value: // A fresh GT_LCL_VAR node referencing the temp which has not been used // GenTree* Compiler::fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType /*= nullptr*/) { GenTree* subTree = *ppTree; unsigned lclNum = lvaGrabTemp(true DEBUGARG("fgInsertCommaFormTemp is creating a new local variable")); if (varTypeIsStruct(subTree)) { assert(structType != nullptr); lvaSetStruct(lclNum, structType, false); } // If subTree->TypeGet() == TYP_STRUCT, gtNewTempAssign() will create a GT_COPYBLK tree. // The type of GT_COPYBLK is TYP_VOID. Therefore, we should use subTree->TypeGet() for // setting type of lcl vars created. GenTree* asg = gtNewTempAssign(lclNum, subTree); GenTree* load = new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, subTree->TypeGet(), lclNum); GenTree* comma = gtNewOperNode(GT_COMMA, subTree->TypeGet(), asg, load); *ppTree = comma; return new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, subTree->TypeGet(), lclNum); } //------------------------------------------------------------------------ // fgInitArgInfo: Construct the fgArgInfo for the call with the fgArgEntry for each arg // // Arguments: // callNode - the call for which we are generating the fgArgInfo // // Return Value: // None // // Notes: // This method is idempotent in that it checks whether the fgArgInfo has already been // constructed, and just returns. // This method only computes the arg table and arg entries for the call (the fgArgInfo), // and makes no modification of the args themselves. // // The IR for the call args can change for calls with non-standard arguments: some non-standard // arguments add new call argument IR nodes. // void Compiler::fgInitArgInfo(GenTreeCall* call) { GenTreeCall::Use* args; GenTree* argx; unsigned argIndex = 0; unsigned intArgRegNum = 0; unsigned fltArgRegNum = 0; DEBUG_ARG_SLOTS_ONLY(unsigned argSlots = 0;) bool callHasRetBuffArg = call->HasRetBufArg(); bool callIsVararg = call->IsVarargs(); #ifdef TARGET_ARM regMaskTP argSkippedRegMask = RBM_NONE; regMaskTP fltArgSkippedRegMask = RBM_NONE; #endif // TARGET_ARM #if defined(TARGET_X86) unsigned maxRegArgs = MAX_REG_ARG; // X86: non-const, must be calculated #else const unsigned maxRegArgs = MAX_REG_ARG; // other arch: fixed constant number #endif if (call->fgArgInfo != nullptr) { // We've already initialized and set the fgArgInfo. return; } JITDUMP("Initializing arg info for %d.%s:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); // At this point, we should never have gtCallLateArgs, as this needs to be done before those are determined. assert(call->gtCallLateArgs == nullptr); if (TargetOS::IsUnix && callIsVararg) { // Currently native varargs is not implemented on non windows targets. // // Note that some targets like Arm64 Unix should not need much work as // the ABI is the same. While other targets may only need small changes // such as amd64 Unix, which just expects RAX to pass numFPArguments. NYI("Morphing Vararg call not yet implemented on non Windows targets."); } // Data structure for keeping track of non-standard args. Non-standard args are those that are not passed // following the normal calling convention or in the normal argument registers. We either mark existing // arguments as non-standard (such as the x8 return buffer register on ARM64), or we manually insert the // non-standard arguments into the argument list, below. class NonStandardArgs { struct NonStandardArg { GenTree* node; // The tree node representing this non-standard argument. // Note that this must be updated if the tree node changes due to morphing! regNumber reg; // The register to be assigned to this non-standard argument. NonStandardArgKind kind; // The kind of the non-standard arg }; ArrayStack<NonStandardArg> args; public: NonStandardArgs(CompAllocator alloc) : args(alloc, 3) // We will have at most 3 non-standard arguments { } //----------------------------------------------------------------------------- // Add: add a non-standard argument to the table of non-standard arguments // // Arguments: // node - a GenTree node that has a non-standard argument. // reg - the register to assign to this node. // // Return Value: // None. // void Add(GenTree* node, regNumber reg, NonStandardArgKind kind) { NonStandardArg nsa = {node, reg, kind}; args.Push(nsa); } //----------------------------------------------------------------------------- // Find: Look for a GenTree* in the set of non-standard args. // // Arguments: // node - a GenTree node to look for // // Return Value: // The index of the non-standard argument (a non-negative, unique, stable number). // If the node is not a non-standard argument, return -1. // int Find(GenTree* node) { for (int i = 0; i < args.Height(); i++) { if (node == args.Top(i).node) { return i; } } return -1; } //----------------------------------------------------------------------------- // Find: Look for a GenTree node in the non-standard arguments set. If found, // set the register to use for the node. // // Arguments: // node - a GenTree node to look for // pReg - an OUT argument. *pReg is set to the non-standard register to use if // 'node' is found in the non-standard argument set. // pKind - an OUT argument. *pKind is set to the kind of the non-standard arg. // // Return Value: // 'true' if 'node' is a non-standard argument. In this case, *pReg and *pKing are set. // 'false' otherwise (in this case, *pReg and *pKind are unmodified). // bool Find(GenTree* node, regNumber* pReg, NonStandardArgKind* pKind) { for (int i = 0; i < args.Height(); i++) { NonStandardArg& nsa = args.TopRef(i); if (node == nsa.node) { *pReg = nsa.reg; *pKind = nsa.kind; return true; } } return false; } //----------------------------------------------------------------------------- // Replace: Replace the non-standard argument node at a given index. This is done when // the original node was replaced via morphing, but we need to continue to assign a // particular non-standard arg to it. // // Arguments: // index - the index of the non-standard arg. It must exist. // node - the new GenTree node. // // Return Value: // None. // void Replace(int index, GenTree* node) { args.TopRef(index).node = node; } } nonStandardArgs(getAllocator(CMK_ArrayStack)); // Count of args. On first morph, this is counted before we've filled in the arg table. // On remorph, we grab it from the arg table. unsigned numArgs = 0; // First we need to count the args if (call->gtCallThisArg != nullptr) { numArgs++; } for (GenTreeCall::Use& use : call->Args()) { numArgs++; } // Insert or mark non-standard args. These are either outside the normal calling convention, or // arguments registers that don't follow the normal progression of argument registers in the calling // convention (such as for the ARM64 fixed return buffer argument x8). // // *********** NOTE ************* // The logic here must remain in sync with GetNonStandardAddedArgCount(), which is used to map arguments // in the implementation of fast tail call. // *********** END NOTE ********* CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) || defined(TARGET_ARM) // The x86 and arm32 CORINFO_HELP_INIT_PINVOKE_FRAME helpers has a custom calling convention. // Set the argument registers correctly here. if (call->IsHelperCall(this, CORINFO_HELP_INIT_PINVOKE_FRAME)) { GenTreeCall::Use* args = call->gtCallArgs; GenTree* arg1 = args->GetNode(); assert(arg1 != nullptr); nonStandardArgs.Add(arg1, REG_PINVOKE_FRAME, NonStandardArgKind::PInvokeFrame); } #endif // defined(TARGET_X86) || defined(TARGET_ARM) #if defined(TARGET_ARM) // A non-standard calling convention using wrapper delegate invoke is used on ARM, only, for wrapper // delegates. It is used for VSD delegate calls where the VSD custom calling convention ABI requires passing // R4, a callee-saved register, with a special value. Since R4 is a callee-saved register, its value needs // to be preserved. Thus, the VM uses a wrapper delegate IL stub, which preserves R4 and also sets up R4 // correctly for the VSD call. The VM is simply reusing an existing mechanism (wrapper delegate IL stub) // to achieve its goal for delegate VSD call. See COMDelegate::NeedsWrapperDelegate() in the VM for details. else if (call->gtCallMoreFlags & GTF_CALL_M_WRAPPER_DELEGATE_INV) { GenTree* arg = call->gtCallThisArg->GetNode(); if (arg->OperIsLocal()) { arg = gtClone(arg, true); } else { GenTree* tmp = fgInsertCommaFormTemp(&arg); call->gtCallThisArg->SetNode(arg); call->gtFlags |= GTF_ASG; arg = tmp; } noway_assert(arg != nullptr); GenTree* newArg = new (this, GT_ADDR) GenTreeAddrMode(TYP_BYREF, arg, nullptr, 0, eeGetEEInfo()->offsetOfWrapperDelegateIndirectCell); // Append newArg as the last arg GenTreeCall::Use** insertionPoint = &call->gtCallArgs; for (; *insertionPoint != nullptr; insertionPoint = &((*insertionPoint)->NextRef())) { } *insertionPoint = gtNewCallArgs(newArg); numArgs++; nonStandardArgs.Add(newArg, virtualStubParamInfo->GetReg(), NonStandardArgKind::WrapperDelegateCell); } #endif // defined(TARGET_ARM) #if defined(TARGET_X86) // The x86 shift helpers have custom calling conventions and expect the lo part of the long to be in EAX and the // hi part to be in EDX. This sets the argument registers up correctly. else if (call->IsHelperCall(this, CORINFO_HELP_LLSH) || call->IsHelperCall(this, CORINFO_HELP_LRSH) || call->IsHelperCall(this, CORINFO_HELP_LRSZ)) { GenTreeCall::Use* args = call->gtCallArgs; GenTree* arg1 = args->GetNode(); assert(arg1 != nullptr); nonStandardArgs.Add(arg1, REG_LNGARG_LO, NonStandardArgKind::ShiftLow); args = args->GetNext(); GenTree* arg2 = args->GetNode(); assert(arg2 != nullptr); nonStandardArgs.Add(arg2, REG_LNGARG_HI, NonStandardArgKind::ShiftHigh); } #else // !TARGET_X86 // TODO-X86-CQ: Currently RyuJIT/x86 passes args on the stack, so this is not needed. // If/when we change that, the following code needs to be changed to correctly support the (TBD) managed calling // convention for x86/SSE. // If we have a Fixed Return Buffer argument register then we setup a non-standard argument for it. // // We don't use the fixed return buffer argument if we have the special unmanaged instance call convention. // That convention doesn't use the fixed return buffer register. // CLANG_FORMAT_COMMENT_ANCHOR; if (call->HasFixedRetBufArg()) { args = call->gtCallArgs; assert(args != nullptr); argx = call->gtCallArgs->GetNode(); // We don't increment numArgs here, since we already counted this argument above. nonStandardArgs.Add(argx, theFixedRetBuffReg(), NonStandardArgKind::FixedRetBuffer); } // We are allowed to have a Fixed Return Buffer argument combined // with any of the remaining non-standard arguments // CLANG_FORMAT_COMMENT_ANCHOR; if (call->IsVirtualStub()) { if (!call->IsTailCallViaJitHelper()) { GenTree* stubAddrArg = fgGetStubAddrArg(call); // And push the stub address onto the list of arguments call->gtCallArgs = gtPrependNewCallArg(stubAddrArg, call->gtCallArgs); numArgs++; nonStandardArgs.Add(stubAddrArg, stubAddrArg->GetRegNum(), NonStandardArgKind::VirtualStubCell); } else { // If it is a VSD call getting dispatched via tail call helper, // fgMorphTailCallViaJitHelper() would materialize stub addr as an additional // parameter added to the original arg list and hence no need to // add as a non-standard arg. } } else #endif // !TARGET_X86 if (call->gtCallType == CT_INDIRECT && (call->gtCallCookie != nullptr)) { assert(!call->IsUnmanaged()); GenTree* arg = call->gtCallCookie; noway_assert(arg != nullptr); call->gtCallCookie = nullptr; // All architectures pass the cookie in a register. call->gtCallArgs = gtPrependNewCallArg(arg, call->gtCallArgs); nonStandardArgs.Add(arg, REG_PINVOKE_COOKIE_PARAM, NonStandardArgKind::PInvokeCookie); numArgs++; // put destination into R10/EAX arg = gtClone(call->gtCallAddr, true); call->gtCallArgs = gtPrependNewCallArg(arg, call->gtCallArgs); numArgs++; nonStandardArgs.Add(arg, REG_PINVOKE_TARGET_PARAM, NonStandardArgKind::PInvokeTarget); // finally change this call to a helper call call->gtCallType = CT_HELPER; call->gtCallMethHnd = eeFindHelper(CORINFO_HELP_PINVOKE_CALLI); } #if defined(FEATURE_READYTORUN) // For arm/arm64, we dispatch code same as VSD using virtualStubParamInfo->GetReg() // for indirection cell address, which ZapIndirectHelperThunk expects. // For x64/x86 we use return address to get the indirection cell by disassembling the call site. // That is not possible for fast tailcalls, so we only need this logic for fast tailcalls on xarch. // Note that we call this before we know if something will be a fast tailcall or not. // That's ok; after making something a tailcall, we will invalidate this information // and reconstruct it if necessary. The tailcalling decision does not change since // this is a non-standard arg in a register. bool needsIndirectionCell = call->IsR2RRelativeIndir() && !call->IsDelegateInvoke(); #if defined(TARGET_XARCH) needsIndirectionCell &= call->IsFastTailCall(); #endif if (needsIndirectionCell) { assert(call->gtEntryPoint.addr != nullptr); size_t addrValue = (size_t)call->gtEntryPoint.addr; GenTree* indirectCellAddress = gtNewIconHandleNode(addrValue, GTF_ICON_FTN_ADDR); #ifdef DEBUG indirectCellAddress->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd; #endif indirectCellAddress->SetRegNum(REG_R2R_INDIRECT_PARAM); #ifdef TARGET_ARM // Issue #xxxx : Don't attempt to CSE this constant on ARM32 // // This constant has specific register requirements, and LSRA doesn't currently correctly // handle them when the value is in a CSE'd local. indirectCellAddress->SetDoNotCSE(); #endif // TARGET_ARM // Push the stub address onto the list of arguments. call->gtCallArgs = gtPrependNewCallArg(indirectCellAddress, call->gtCallArgs); numArgs++; nonStandardArgs.Add(indirectCellAddress, indirectCellAddress->GetRegNum(), NonStandardArgKind::R2RIndirectionCell); } #endif if ((REG_VALIDATE_INDIRECT_CALL_ADDR != REG_ARG_0) && call->IsHelperCall(this, CORINFO_HELP_VALIDATE_INDIRECT_CALL)) { assert(call->gtCallArgs != nullptr); GenTreeCall::Use* args = call->gtCallArgs; GenTree* tar = args->GetNode(); nonStandardArgs.Add(tar, REG_VALIDATE_INDIRECT_CALL_ADDR, NonStandardArgKind::ValidateIndirectCallTarget); } // Allocate the fgArgInfo for the call node; // call->fgArgInfo = new (this, CMK_Unknown) fgArgInfo(this, call, numArgs); // Add the 'this' argument value, if present. if (call->gtCallThisArg != nullptr) { argx = call->gtCallThisArg->GetNode(); assert(argIndex == 0); assert(call->gtCallType == CT_USER_FUNC || call->gtCallType == CT_INDIRECT); assert(varTypeIsGC(argx) || (argx->gtType == TYP_I_IMPL)); const regNumber regNum = genMapIntRegArgNumToRegNum(intArgRegNum); const unsigned numRegs = 1; const unsigned byteSize = TARGET_POINTER_SIZE; const unsigned byteAlignment = TARGET_POINTER_SIZE; const bool isStruct = false; const bool isFloatHfa = false; // This is a register argument - put it in the table. call->fgArgInfo->AddRegArg(argIndex, argx, call->gtCallThisArg, regNum, numRegs, byteSize, byteAlignment, isStruct, isFloatHfa, callIsVararg UNIX_AMD64_ABI_ONLY_ARG(REG_STK) UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(nullptr)); intArgRegNum++; #ifdef WINDOWS_AMD64_ABI // Whenever we pass an integer register argument // we skip the corresponding floating point register argument fltArgRegNum++; #endif // WINDOWS_AMD64_ABI argIndex++; DEBUG_ARG_SLOTS_ONLY(argSlots++;) } #ifdef TARGET_X86 // Compute the maximum number of arguments that can be passed in registers. // For X86 we handle the varargs and unmanaged calling conventions #ifndef UNIX_X86_ABI if (call->gtFlags & GTF_CALL_POP_ARGS) { noway_assert(intArgRegNum < MAX_REG_ARG); // No more register arguments for varargs (CALL_POP_ARGS) maxRegArgs = intArgRegNum; // Add in the ret buff arg if (callHasRetBuffArg) maxRegArgs++; } #endif // UNIX_X86_ABI if (call->IsUnmanaged()) { noway_assert(intArgRegNum == 0); if (call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { noway_assert(call->gtCallArgs->GetNode()->TypeGet() == TYP_I_IMPL || call->gtCallArgs->GetNode()->TypeGet() == TYP_BYREF || call->gtCallArgs->GetNode()->gtOper == GT_NOP); // the arg was already morphed to a register (fgMorph called twice) maxRegArgs = 1; } else { maxRegArgs = 0; } #ifdef UNIX_X86_ABI // Add in the ret buff arg if (callHasRetBuffArg && call->unmgdCallConv != CorInfoCallConvExtension::C && // C and Stdcall calling conventions do not call->unmgdCallConv != CorInfoCallConvExtension::Stdcall) // use registers to pass arguments. maxRegArgs++; #endif } #endif // TARGET_X86 /* Morph the user arguments */ CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM) // The ARM ABI has a concept of back-filling of floating-point argument registers, according // to the "Procedure Call Standard for the ARM Architecture" document, especially // section 6.1.2.3 "Parameter passing". Back-filling is where floating-point argument N+1 can // appear in a lower-numbered register than floating point argument N. That is, argument // register allocation is not strictly increasing. To support this, we need to keep track of unused // floating-point argument registers that we can back-fill. We only support 4-byte float and // 8-byte double types, and one to four element HFAs composed of these types. With this, we will // only back-fill single registers, since there is no way with these types to create // an alignment hole greater than one register. However, there can be up to 3 back-fill slots // available (with 16 FP argument registers). Consider this code: // // struct HFA { float x, y, z; }; // a three element HFA // void bar(float a1, // passed in f0 // double a2, // passed in f2/f3; skip f1 for alignment // HFA a3, // passed in f4/f5/f6 // double a4, // passed in f8/f9; skip f7 for alignment. NOTE: it doesn't fit in the f1 back-fill slot // HFA a5, // passed in f10/f11/f12 // double a6, // passed in f14/f15; skip f13 for alignment. NOTE: it doesn't fit in the f1 or f7 back-fill // // slots // float a7, // passed in f1 (back-filled) // float a8, // passed in f7 (back-filled) // float a9, // passed in f13 (back-filled) // float a10) // passed on the stack in [OutArg+0] // // Note that if we ever support FP types with larger alignment requirements, then there could // be more than single register back-fills. // // Once we assign a floating-pointer register to the stack, they all must be on the stack. // See "Procedure Call Standard for the ARM Architecture", section 6.1.2.3, "The back-filling // continues only so long as no VFP CPRC has been allocated to a slot on the stack." // We set anyFloatStackArgs to true when a floating-point argument has been assigned to the stack // and prevent any additional floating-point arguments from going in registers. bool anyFloatStackArgs = false; #endif // TARGET_ARM #ifdef UNIX_AMD64_ABI SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; #endif // UNIX_AMD64_ABI #if defined(DEBUG) // Check that we have valid information about call's argument types. // For example: // load byte; call(int) -> CALL(PUTARG_TYPE byte(IND byte)); // load int; call(byte) -> CALL(PUTARG_TYPE int (IND int)); // etc. if (call->callSig != nullptr) { CORINFO_SIG_INFO* sig = call->callSig; const unsigned sigArgsCount = sig->numArgs; GenTreeCall::Use* nodeArgs = call->gtCallArgs; // It could include many arguments not included in `sig->numArgs`, for example, `this`, runtime lookup, cookie // etc. unsigned nodeArgsCount = 0; call->VisitOperands([&nodeArgsCount](GenTree* operand) -> GenTree::VisitResult { nodeArgsCount++; return GenTree::VisitResult::Continue; }); if (call->gtCallThisArg != nullptr) { // Handle the most common argument not in the `sig->numArgs`. // so the following check works on more methods. nodeArgsCount--; } assert(nodeArgsCount >= sigArgsCount); if ((nodeArgsCount == sigArgsCount) && ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (nodeArgsCount == 1))) { CORINFO_ARG_LIST_HANDLE sigArg = sig->args; for (unsigned i = 0; i < sig->numArgs; ++i) { CORINFO_CLASS_HANDLE argClass; const CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArg, &argClass)); const var_types sigType = JITtype2varType(corType); assert(nodeArgs != nullptr); const GenTree* nodeArg = nodeArgs->GetNode(); assert(nodeArg != nullptr); const var_types nodeType = nodeArg->TypeGet(); assert((nodeType == sigType) || varTypeIsStruct(sigType) || genTypeSize(nodeType) == genTypeSize(sigType)); sigArg = info.compCompHnd->getArgNext(sigArg); nodeArgs = nodeArgs->GetNext(); } assert(nodeArgs == nullptr); } } #endif // DEBUG for (args = call->gtCallArgs; args != nullptr; args = args->GetNext(), argIndex++) { argx = args->GetNode()->gtSkipPutArgType(); // Change the node to TYP_I_IMPL so we don't report GC info // NOTE: We deferred this from the importer because of the inliner. if (argx->IsLocalAddrExpr() != nullptr) { argx->gtType = TYP_I_IMPL; } // We should never have any ArgPlaceHolder nodes at this point. assert(!argx->IsArgPlaceHolderNode()); // Setup any HFA information about 'argx' bool isHfaArg = false; var_types hfaType = TYP_UNDEF; unsigned hfaSlots = 0; bool passUsingFloatRegs; unsigned argAlignBytes = TARGET_POINTER_SIZE; unsigned size = 0; unsigned byteSize = 0; if (GlobalJitOptions::compFeatureHfa) { hfaType = GetHfaType(argx); isHfaArg = varTypeIsValidHfaType(hfaType); #if defined(TARGET_ARM64) if (TargetOS::IsWindows) { // Make sure for vararg methods isHfaArg is not true. isHfaArg = callIsVararg ? false : isHfaArg; } #endif // defined(TARGET_ARM64) if (isHfaArg) { isHfaArg = true; hfaSlots = GetHfaCount(argx); // If we have a HFA struct it's possible we transition from a method that originally // only had integer types to now start having FP types. We have to communicate this // through this flag since LSRA later on will use this flag to determine whether // or not to track the FP register set. // compFloatingPointUsed = true; } } const bool isFloatHfa = (hfaType == TYP_FLOAT); #ifdef TARGET_ARM passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeUsesFloatReg(argx)) && !opts.compUseSoftFP; bool passUsingIntRegs = passUsingFloatRegs ? false : (intArgRegNum < MAX_REG_ARG); // We don't use the "size" return value from InferOpSizeAlign(). codeGen->InferOpSizeAlign(argx, &argAlignBytes); argAlignBytes = roundUp(argAlignBytes, TARGET_POINTER_SIZE); if (argAlignBytes == 2 * TARGET_POINTER_SIZE) { if (passUsingFloatRegs) { if (fltArgRegNum % 2 == 1) { fltArgSkippedRegMask |= genMapArgNumToRegMask(fltArgRegNum, TYP_FLOAT); fltArgRegNum++; } } else if (passUsingIntRegs) { if (intArgRegNum % 2 == 1) { argSkippedRegMask |= genMapArgNumToRegMask(intArgRegNum, TYP_I_IMPL); intArgRegNum++; } } #if defined(DEBUG) if (argSlots % 2 == 1) { argSlots++; } #endif } #elif defined(TARGET_ARM64) assert(!callIsVararg || !isHfaArg); passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeUsesFloatReg(argx)); #elif defined(TARGET_AMD64) passUsingFloatRegs = varTypeIsFloating(argx); #elif defined(TARGET_X86) passUsingFloatRegs = false; #else #error Unsupported or unset target architecture #endif // TARGET* bool isBackFilled = false; unsigned nextFltArgRegNum = fltArgRegNum; // This is the next floating-point argument register number to use var_types structBaseType = TYP_STRUCT; unsigned structSize = 0; bool passStructByRef = false; bool isStructArg; GenTree* actualArg = argx->gtEffectiveVal(true /* Commas only */); // // Figure out the size of the argument. This is either in number of registers, or number of // TARGET_POINTER_SIZE stack slots, or the sum of these if the argument is split between the registers and // the stack. // isStructArg = varTypeIsStruct(argx); CORINFO_CLASS_HANDLE objClass = NO_CLASS_HANDLE; if (isStructArg) { objClass = gtGetStructHandle(argx); if (argx->TypeGet() == TYP_STRUCT) { // For TYP_STRUCT arguments we must have an OBJ, LCL_VAR or MKREFANY switch (actualArg->OperGet()) { case GT_OBJ: structSize = actualArg->AsObj()->GetLayout()->GetSize(); assert(structSize == info.compCompHnd->getClassSize(objClass)); break; case GT_LCL_VAR: structSize = lvaGetDesc(actualArg->AsLclVarCommon())->lvExactSize; break; case GT_MKREFANY: structSize = info.compCompHnd->getClassSize(objClass); break; default: BADCODE("illegal argument tree in fgInitArgInfo"); break; } } else { structSize = genTypeSize(argx); assert(structSize == info.compCompHnd->getClassSize(objClass)); } } #if defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI if (!isStructArg) { size = 1; // On AMD64, all primitives fit in a single (64-bit) 'slot' byteSize = genTypeSize(argx); } else { size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE; byteSize = structSize; eeGetSystemVAmd64PassStructInRegisterDescriptor(objClass, &structDesc); } #else // !UNIX_AMD64_ABI size = 1; // On AMD64 Windows, all args fit in a single (64-bit) 'slot' if (!isStructArg) { byteSize = genTypeSize(argx); } #endif // UNIX_AMD64_ABI #elif defined(TARGET_ARM64) if (isStructArg) { if (isHfaArg) { // HFA structs are passed by value in multiple registers. // The "size" in registers may differ the size in pointer-sized units. CORINFO_CLASS_HANDLE structHnd = gtGetStructHandle(argx); size = GetHfaCount(structHnd); byteSize = info.compCompHnd->getClassSize(structHnd); } else { // Structs are either passed in 1 or 2 (64-bit) slots. // Structs that are the size of 2 pointers are passed by value in multiple registers, // if sufficient registers are available. // Structs that are larger than 2 pointers (except for HFAs) are passed by // reference (to a copy) size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE; byteSize = structSize; if (size > 2) { size = 1; } } // Note that there are some additional rules for multireg structs. // (i.e they cannot be split between registers and the stack) } else { size = 1; // Otherwise, all primitive types fit in a single (64-bit) 'slot' byteSize = genTypeSize(argx); } #elif defined(TARGET_ARM) || defined(TARGET_X86) if (isStructArg) { size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE; byteSize = structSize; } else { // The typical case. // Long/double type argument(s) will be modified as needed in Lowering. size = genTypeStSz(argx->gtType); byteSize = genTypeSize(argx); } #else #error Unsupported or unset target architecture #endif // TARGET_XXX if (isStructArg) { assert(argx == args->GetNode()); assert(structSize != 0); structPassingKind howToPassStruct; structBaseType = getArgTypeForStruct(objClass, &howToPassStruct, callIsVararg, structSize); passStructByRef = (howToPassStruct == SPK_ByReference); if (howToPassStruct == SPK_ByReference) { byteSize = TARGET_POINTER_SIZE; } else { byteSize = structSize; } if (howToPassStruct == SPK_PrimitiveType) { #ifdef TARGET_ARM // TODO-CQ: getArgTypeForStruct should *not* return TYP_DOUBLE for a double struct, // or for a struct of two floats. This causes the struct to be address-taken. if (structBaseType == TYP_DOUBLE) { size = 2; } else #endif // TARGET_ARM { size = 1; } } else if (passStructByRef) { size = 1; } } const var_types argType = args->GetNode()->TypeGet(); if (args->GetNode()->OperIs(GT_PUTARG_TYPE)) { byteSize = genTypeSize(argType); } // The 'size' value has now must have been set. (the original value of zero is an invalid value) assert(size != 0); assert(byteSize != 0); if (compMacOsArm64Abi()) { // Arm64 Apple has a special ABI for passing small size arguments on stack, // bytes are aligned to 1-byte, shorts to 2-byte, int/float to 4-byte, etc. // It means passing 8 1-byte arguments on stack can take as small as 8 bytes. argAlignBytes = eeGetArgSizeAlignment(argType, isFloatHfa); } // // Figure out if the argument will be passed in a register. // bool isRegArg = false; NonStandardArgKind nonStandardArgKind = NonStandardArgKind::None; regNumber nonStdRegNum = REG_NA; if (isRegParamType(genActualType(argx->TypeGet())) #ifdef UNIX_AMD64_ABI && (!isStructArg || structDesc.passedInRegisters) #elif defined(TARGET_X86) || (isStructArg && isTrivialPointerSizedStruct(objClass)) #endif ) { #ifdef TARGET_ARM if (passUsingFloatRegs) { // First, see if it can be back-filled if (!anyFloatStackArgs && // Is it legal to back-fill? (We haven't put any FP args on the stack yet) (fltArgSkippedRegMask != RBM_NONE) && // Is there an available back-fill slot? (size == 1)) // The size to back-fill is one float register { // Back-fill the register. isBackFilled = true; regMaskTP backFillBitMask = genFindLowestBit(fltArgSkippedRegMask); fltArgSkippedRegMask &= ~backFillBitMask; // Remove the back-filled register(s) from the skipped mask nextFltArgRegNum = genMapFloatRegNumToRegArgNum(genRegNumFromMask(backFillBitMask)); assert(nextFltArgRegNum < MAX_FLOAT_REG_ARG); } // Does the entire float, double, or HFA fit in the FP arg registers? // Check if the last register needed is still in the argument register range. isRegArg = (nextFltArgRegNum + size - 1) < MAX_FLOAT_REG_ARG; if (!isRegArg) { anyFloatStackArgs = true; } } else { isRegArg = intArgRegNum < MAX_REG_ARG; } #elif defined(TARGET_ARM64) if (passUsingFloatRegs) { // Check if the last register needed is still in the fp argument register range. isRegArg = (nextFltArgRegNum + (size - 1)) < MAX_FLOAT_REG_ARG; // Do we have a HFA arg that we wanted to pass in registers, but we ran out of FP registers? if (isHfaArg && !isRegArg) { // recompute the 'size' so that it represent the number of stack slots rather than the number of // registers // unsigned roundupSize = (unsigned)roundUp(structSize, TARGET_POINTER_SIZE); size = roundupSize / TARGET_POINTER_SIZE; // We also must update fltArgRegNum so that we no longer try to // allocate any new floating point registers for args // This prevents us from backfilling a subsequent arg into d7 // fltArgRegNum = MAX_FLOAT_REG_ARG; } } else { // Check if the last register needed is still in the int argument register range. isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs; // Did we run out of registers when we had a 16-byte struct (size===2) ? // (i.e we only have one register remaining but we needed two registers to pass this arg) // This prevents us from backfilling a subsequent arg into x7 // if (!isRegArg && (size > 1)) { // Arm64 windows native varargs allows splitting a 16 byte struct between stack // and the last general purpose register. if (TargetOS::IsWindows && callIsVararg) { // Override the decision and force a split. isRegArg = (intArgRegNum + (size - 1)) <= maxRegArgs; } else { // We also must update intArgRegNum so that we no longer try to // allocate any new general purpose registers for args // intArgRegNum = maxRegArgs; } } } #else // not TARGET_ARM or TARGET_ARM64 #if defined(UNIX_AMD64_ABI) // Here a struct can be passed in register following the classifications of its members and size. // Now make sure there are actually enough registers to do so. if (isStructArg) { unsigned int structFloatRegs = 0; unsigned int structIntRegs = 0; for (unsigned int i = 0; i < structDesc.eightByteCount; i++) { if (structDesc.IsIntegralSlot(i)) { structIntRegs++; } else if (structDesc.IsSseSlot(i)) { structFloatRegs++; } } isRegArg = ((nextFltArgRegNum + structFloatRegs) <= MAX_FLOAT_REG_ARG) && ((intArgRegNum + structIntRegs) <= MAX_REG_ARG); } else { if (passUsingFloatRegs) { isRegArg = nextFltArgRegNum < MAX_FLOAT_REG_ARG; } else { isRegArg = intArgRegNum < MAX_REG_ARG; } } #else // !defined(UNIX_AMD64_ABI) isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs; #endif // !defined(UNIX_AMD64_ABI) #endif // TARGET_ARM } else { isRegArg = false; } // If there are nonstandard args (outside the calling convention) they were inserted above // and noted them in a table so we can recognize them here and build their argInfo. // // They should not affect the placement of any other args or stack space required. // Example: on AMD64 R10 and R11 are used for indirect VSD (generic interface) and cookie calls. bool isNonStandard = nonStandardArgs.Find(argx, &nonStdRegNum, &nonStandardArgKind); if (isNonStandard) { isRegArg = (nonStdRegNum != REG_STK); } else if (call->IsTailCallViaJitHelper()) { // We have already (before calling fgMorphArgs()) appended the 4 special args // required by the x86 tailcall helper. These args are required to go on the // stack. Force them to the stack here. assert(numArgs >= 4); if (argIndex >= numArgs - 4) { isRegArg = false; } } // Now we know if the argument goes in registers or not and how big it is. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM // If we ever allocate a floating point argument to the stack, then all // subsequent HFA/float/double arguments go on the stack. if (!isRegArg && passUsingFloatRegs) { for (; fltArgRegNum < MAX_FLOAT_REG_ARG; ++fltArgRegNum) { fltArgSkippedRegMask |= genMapArgNumToRegMask(fltArgRegNum, TYP_FLOAT); } } // If we think we're going to split a struct between integer registers and the stack, check to // see if we've already assigned a floating-point arg to the stack. if (isRegArg && // We decided above to use a register for the argument !passUsingFloatRegs && // We're using integer registers (intArgRegNum + size > MAX_REG_ARG) && // We're going to split a struct type onto registers and stack anyFloatStackArgs) // We've already used the stack for a floating-point argument { isRegArg = false; // Change our mind; don't pass this struct partially in registers // Skip the rest of the integer argument registers for (; intArgRegNum < MAX_REG_ARG; ++intArgRegNum) { argSkippedRegMask |= genMapArgNumToRegMask(intArgRegNum, TYP_I_IMPL); } } #endif // TARGET_ARM // Now create the fgArgTabEntry. fgArgTabEntry* newArgEntry; if (isRegArg) { regNumber nextRegNum = REG_STK; #if defined(UNIX_AMD64_ABI) regNumber nextOtherRegNum = REG_STK; unsigned int structFloatRegs = 0; unsigned int structIntRegs = 0; #endif // defined(UNIX_AMD64_ABI) if (isNonStandard) { nextRegNum = nonStdRegNum; } #if defined(UNIX_AMD64_ABI) else if (isStructArg && structDesc.passedInRegisters) { // It is a struct passed in registers. Assign the next available register. assert((structDesc.eightByteCount <= 2) && "Too many eightbytes."); regNumber* nextRegNumPtrs[2] = {&nextRegNum, &nextOtherRegNum}; for (unsigned int i = 0; i < structDesc.eightByteCount; i++) { if (structDesc.IsIntegralSlot(i)) { *nextRegNumPtrs[i] = genMapIntRegArgNumToRegNum(intArgRegNum + structIntRegs); ++structIntRegs; } else if (structDesc.IsSseSlot(i)) { *nextRegNumPtrs[i] = genMapFloatRegArgNumToRegNum(nextFltArgRegNum + structFloatRegs); ++structFloatRegs; } } } #endif // defined(UNIX_AMD64_ABI) else { // fill in or update the argInfo table nextRegNum = passUsingFloatRegs ? genMapFloatRegArgNumToRegNum(nextFltArgRegNum) : genMapIntRegArgNumToRegNum(intArgRegNum); } #ifdef TARGET_AMD64 #ifndef UNIX_AMD64_ABI assert(size == 1); #endif #endif // This is a register argument - put it in the table newArgEntry = call->fgArgInfo->AddRegArg(argIndex, argx, args, nextRegNum, size, byteSize, argAlignBytes, isStructArg, isFloatHfa, callIsVararg UNIX_AMD64_ABI_ONLY_ARG(nextOtherRegNum) UNIX_AMD64_ABI_ONLY_ARG(structIntRegs) UNIX_AMD64_ABI_ONLY_ARG(structFloatRegs) UNIX_AMD64_ABI_ONLY_ARG(&structDesc)); newArgEntry->SetIsBackFilled(isBackFilled); // Set up the next intArgRegNum and fltArgRegNum values. if (!isBackFilled) { #if defined(UNIX_AMD64_ABI) if (isStructArg) { // For this case, we've already set the regNums in the argTabEntry intArgRegNum += structIntRegs; fltArgRegNum += structFloatRegs; } else #endif // defined(UNIX_AMD64_ABI) { if (!isNonStandard) { #if FEATURE_ARG_SPLIT // Check for a split (partially enregistered) struct if (compFeatureArgSplit() && !passUsingFloatRegs && ((intArgRegNum + size) > MAX_REG_ARG)) { // This indicates a partial enregistration of a struct type assert((isStructArg) || argx->OperIs(GT_FIELD_LIST) || argx->OperIsCopyBlkOp() || (argx->gtOper == GT_COMMA && (argx->gtFlags & GTF_ASG))); unsigned numRegsPartial = MAX_REG_ARG - intArgRegNum; assert((unsigned char)numRegsPartial == numRegsPartial); call->fgArgInfo->SplitArg(argIndex, numRegsPartial, size - numRegsPartial); } #endif // FEATURE_ARG_SPLIT if (passUsingFloatRegs) { fltArgRegNum += size; #ifdef WINDOWS_AMD64_ABI // Whenever we pass an integer register argument // we skip the corresponding floating point register argument intArgRegNum = min(intArgRegNum + size, MAX_REG_ARG); #endif // WINDOWS_AMD64_ABI // No supported architecture supports partial structs using float registers. assert(fltArgRegNum <= MAX_FLOAT_REG_ARG); } else { // Increment intArgRegNum by 'size' registers intArgRegNum += size; #ifdef WINDOWS_AMD64_ABI fltArgRegNum = min(fltArgRegNum + size, MAX_FLOAT_REG_ARG); #endif // WINDOWS_AMD64_ABI } } } } } else // We have an argument that is not passed in a register { // This is a stack argument - put it in the table newArgEntry = call->fgArgInfo->AddStkArg(argIndex, argx, args, size, byteSize, argAlignBytes, isStructArg, isFloatHfa, callIsVararg); #ifdef UNIX_AMD64_ABI // TODO-Amd64-Unix-CQ: This is temporary (see also in fgMorphArgs). if (structDesc.passedInRegisters) { newArgEntry->structDesc.CopyFrom(structDesc); } #endif } newArgEntry->nonStandardArgKind = nonStandardArgKind; if (GlobalJitOptions::compFeatureHfa) { if (isHfaArg) { newArgEntry->SetHfaType(hfaType, hfaSlots); } } newArgEntry->SetMultiRegNums(); noway_assert(newArgEntry != nullptr); if (newArgEntry->isStruct) { newArgEntry->passedByRef = passStructByRef; newArgEntry->argType = (structBaseType == TYP_UNKNOWN) ? argx->TypeGet() : structBaseType; } else { newArgEntry->argType = argx->TypeGet(); } DEBUG_ARG_SLOTS_ONLY(argSlots += size;) } // end foreach argument loop #ifdef DEBUG if (verbose) { JITDUMP("ArgTable for %d.%s after fgInitArgInfo:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); call->fgArgInfo->Dump(this); JITDUMP("\n"); } #endif } //------------------------------------------------------------------------ // fgMorphArgs: Walk and transform (morph) the arguments of a call // // Arguments: // callNode - the call for which we are doing the argument morphing // // Return Value: // Like most morph methods, this method returns the morphed node, // though in this case there are currently no scenarios where the // node itself is re-created. // // Notes: // This calls fgInitArgInfo to create the 'fgArgInfo' for the call. // If it has already been created, that method will simply return. // // This method changes the state of the call node. It uses the existence // of gtCallLateArgs (the late arguments list) to determine if it has // already done the first round of morphing. // // The first time it is called (i.e. during global morphing), this method // computes the "late arguments". This is when it determines which arguments // need to be evaluated to temps prior to the main argument setup, and which // can be directly evaluated into the argument location. It also creates a // second argument list (gtCallLateArgs) that does the final placement of the // arguments, e.g. into registers or onto the stack. // // The "non-late arguments", aka the gtCallArgs, are doing the in-order // evaluation of the arguments that might have side-effects, such as embedded // assignments, calls or possible throws. In these cases, it and earlier // arguments must be evaluated to temps. // // On targets with a fixed outgoing argument area (FEATURE_FIXED_OUT_ARGS), // if we have any nested calls, we need to defer the copying of the argument // into the fixed argument area until after the call. If the argument did not // otherwise need to be computed into a temp, it is moved to gtCallLateArgs and // replaced in the "early" arg list (gtCallArgs) with a placeholder node. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) { GenTreeCall::Use* args; GenTree* argx; GenTreeFlags flagsSummary = GTF_EMPTY; unsigned argIndex = 0; DEBUG_ARG_SLOTS_ONLY(unsigned argSlots = 0;) bool reMorphing = call->AreArgsComplete(); // Set up the fgArgInfo. fgInitArgInfo(call); JITDUMP("%sMorphing args for %d.%s:\n", (reMorphing) ? "Re" : "", call->gtTreeID, GenTree::OpName(call->gtOper)); // If we are remorphing, process the late arguments (which were determined by a previous caller). if (reMorphing) { for (GenTreeCall::Use& use : call->LateArgs()) { use.SetNode(fgMorphTree(use.GetNode())); flagsSummary |= use.GetNode()->gtFlags; } assert(call->fgArgInfo != nullptr); } call->fgArgInfo->RemorphReset(); // First we morph the argument subtrees ('this' pointer, arguments, etc.). // During the first call to fgMorphArgs we also record the // information about late arguments we have in 'fgArgInfo'. // This information is used later to contruct the gtCallLateArgs */ // Process the 'this' argument value, if present. if (call->gtCallThisArg != nullptr) { argx = call->gtCallThisArg->GetNode(); fgArgTabEntry* thisArgEntry = call->fgArgInfo->GetArgEntry(0, reMorphing); argx = fgMorphTree(argx); call->gtCallThisArg->SetNode(argx); // This is a register argument - possibly update it in the table. call->fgArgInfo->UpdateRegArg(thisArgEntry, argx, reMorphing); flagsSummary |= argx->gtFlags; if (!reMorphing && call->IsExpandedEarly() && call->IsVirtualVtable()) { if (!argx->OperIsLocal()) { thisArgEntry->needTmp = true; call->fgArgInfo->SetNeedsTemps(); } } assert(argIndex == 0); argIndex++; DEBUG_ARG_SLOTS_ONLY(argSlots++;) } // Note that this name is a bit of a misnomer - it indicates that there are struct args // that occupy more than a single slot that are passed by value (not necessarily in regs). bool hasMultiregStructArgs = false; for (args = call->gtCallArgs; args != nullptr; args = args->GetNext(), argIndex++) { GenTree** parentArgx = &args->NodeRef(); fgArgTabEntry* argEntry = call->fgArgInfo->GetArgEntry(argIndex, reMorphing); // Morph the arg node, and update the parent and argEntry pointers. argx = *parentArgx; argx = fgMorphTree(argx); *parentArgx = argx; assert(argx == args->GetNode()); DEBUG_ARG_SLOTS_ONLY(unsigned size = argEntry->getSize();) CORINFO_CLASS_HANDLE copyBlkClass = NO_CLASS_HANDLE; #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { if (argEntry->GetByteAlignment() == 2 * TARGET_POINTER_SIZE) { if (argSlots % 2 == 1) { argSlots++; } } } #endif // DEBUG if (argEntry->isNonStandard() && argEntry->isPassedInRegisters()) { // We need to update the node field for this nonStandard arg here // as it may have been changed by the call to fgMorphTree. call->fgArgInfo->UpdateRegArg(argEntry, argx, reMorphing); flagsSummary |= argx->gtFlags; continue; } DEBUG_ARG_SLOTS_ASSERT(size != 0); DEBUG_ARG_SLOTS_ONLY(argSlots += argEntry->getSlotCount();) if (argx->IsLocalAddrExpr() != nullptr) { argx->gtType = TYP_I_IMPL; } // Get information about this argument. var_types hfaType = argEntry->GetHfaType(); bool isHfaArg = (hfaType != TYP_UNDEF); bool passUsingFloatRegs = argEntry->isPassedInFloatRegisters(); unsigned structSize = 0; // Struct arguments may be morphed into a node that is not a struct type. // In such case the fgArgTabEntry keeps track of whether the original node (before morphing) // was a struct and the struct classification. bool isStructArg = argEntry->isStruct; GenTree* argObj = argx->gtEffectiveVal(true /*commaOnly*/); if (isStructArg && varTypeIsStruct(argObj) && !argObj->OperIs(GT_ASG, GT_MKREFANY, GT_FIELD_LIST, GT_ARGPLACE)) { CORINFO_CLASS_HANDLE objClass = gtGetStructHandle(argObj); unsigned originalSize; if (argObj->TypeGet() == TYP_STRUCT) { if (argObj->OperIs(GT_OBJ)) { // Get the size off the OBJ node. originalSize = argObj->AsObj()->GetLayout()->GetSize(); assert(originalSize == info.compCompHnd->getClassSize(objClass)); } else { // We have a BADCODE assert for this in fgInitArgInfo. assert(argObj->OperIs(GT_LCL_VAR)); originalSize = lvaGetDesc(argObj->AsLclVarCommon())->lvExactSize; } } else { originalSize = genTypeSize(argx); assert(originalSize == info.compCompHnd->getClassSize(objClass)); } unsigned roundupSize = (unsigned)roundUp(originalSize, TARGET_POINTER_SIZE); var_types structBaseType = argEntry->argType; // First, handle the case where the argument is passed by reference. if (argEntry->passedByRef) { DEBUG_ARG_SLOTS_ASSERT(size == 1); copyBlkClass = objClass; #ifdef UNIX_AMD64_ABI assert(!"Structs are not passed by reference on x64/ux"); #endif // UNIX_AMD64_ABI } else // This is passed by value. { // Check to see if we can transform this into load of a primitive type. // 'size' must be the number of pointer sized items DEBUG_ARG_SLOTS_ASSERT(size == roundupSize / TARGET_POINTER_SIZE); structSize = originalSize; unsigned passingSize = originalSize; // Check to see if we can transform this struct load (GT_OBJ) into a GT_IND of the appropriate size. // When it can do this is platform-dependent: // - In general, it can be done for power of 2 structs that fit in a single register. // - For ARM and ARM64 it must also be a non-HFA struct, or have a single field. // - This is irrelevant for X86, since structs are always passed by value on the stack. GenTree* lclVar = fgIsIndirOfAddrOfLocal(argObj); bool canTransform = false; if (structBaseType != TYP_STRUCT) { if (isPow2(passingSize)) { canTransform = (!argEntry->IsHfaArg() || (passingSize == genTypeSize(argEntry->GetHfaType()))); } #if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) // For ARM64 or AMD64/UX we can pass non-power-of-2 structs in a register, but we can // only transform in that case if the arg is a local. // TODO-CQ: This transformation should be applicable in general, not just for the ARM64 // or UNIX_AMD64_ABI cases where they will be passed in registers. else { canTransform = (lclVar != nullptr); passingSize = genTypeSize(structBaseType); } #endif // TARGET_ARM64 || UNIX_AMD64_ABI } if (!canTransform) { #if defined(TARGET_AMD64) #ifndef UNIX_AMD64_ABI // On Windows structs are always copied and passed by reference (handled above) unless they are // passed by value in a single register. assert(size == 1); copyBlkClass = objClass; #else // UNIX_AMD64_ABI // On Unix, structs are always passed by value. // We only need a copy if we have one of the following: // - The sizes don't match for a non-lclVar argument. // - We have a known struct type (e.g. SIMD) that requires multiple registers. // TODO-Amd64-Unix-Throughput: We don't need to keep the structDesc in the argEntry if it's not // actually passed in registers. if (argEntry->isPassedInRegisters()) { if (argObj->OperIs(GT_OBJ)) { if (passingSize != structSize) { copyBlkClass = objClass; } } else if (lclVar == nullptr) { // This should only be the case of a value directly producing a known struct type. assert(argObj->TypeGet() != TYP_STRUCT); if (argEntry->numRegs > 1) { copyBlkClass = objClass; } } } #endif // UNIX_AMD64_ABI #elif defined(TARGET_ARM64) if ((passingSize != structSize) && (lclVar == nullptr)) { copyBlkClass = objClass; } #endif #ifdef TARGET_ARM // TODO-1stClassStructs: Unify these conditions across targets. if (((lclVar != nullptr) && (lvaGetPromotionType(lclVar->AsLclVarCommon()->GetLclNum()) == PROMOTION_TYPE_INDEPENDENT)) || ((argObj->OperIs(GT_OBJ)) && (passingSize != structSize))) { copyBlkClass = objClass; } if (structSize < TARGET_POINTER_SIZE) { copyBlkClass = objClass; } #endif // TARGET_ARM } else { // We have a struct argument that fits into a register, and it is either a power of 2, // or a local. // Change our argument, as needed, into a value of the appropriate type. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM DEBUG_ARG_SLOTS_ASSERT((size == 1) || ((structBaseType == TYP_DOUBLE) && (size == 2))); #else DEBUG_ARG_SLOTS_ASSERT((size == 1) || (varTypeIsSIMD(structBaseType) && size == (genTypeSize(structBaseType) / REGSIZE_BYTES))); #endif assert((structBaseType != TYP_STRUCT) && (genTypeSize(structBaseType) >= originalSize)); if (argObj->OperIs(GT_OBJ)) { argObj->ChangeOper(GT_IND); // Now see if we can fold *(&X) into X if (argObj->AsOp()->gtOp1->gtOper == GT_ADDR) { GenTree* temp = argObj->AsOp()->gtOp1->AsOp()->gtOp1; // Keep the DONT_CSE flag in sync // (as the addr always marks it for its op1) temp->gtFlags &= ~GTF_DONT_CSE; temp->gtFlags |= (argObj->gtFlags & GTF_DONT_CSE); DEBUG_DESTROY_NODE(argObj->AsOp()->gtOp1); // GT_ADDR DEBUG_DESTROY_NODE(argObj); // GT_IND argObj = temp; *parentArgx = temp; argx = temp; } } if (argObj->gtOper == GT_LCL_VAR) { unsigned lclNum = argObj->AsLclVarCommon()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvPromoted) { if (varDsc->lvFieldCnt == 1) { // get the first and only promoted field LclVarDsc* fieldVarDsc = lvaGetDesc(varDsc->lvFieldLclStart); if (genTypeSize(fieldVarDsc->TypeGet()) >= originalSize) { // we will use the first and only promoted field argObj->AsLclVarCommon()->SetLclNum(varDsc->lvFieldLclStart); if (varTypeIsEnregisterable(fieldVarDsc->TypeGet()) && (genTypeSize(fieldVarDsc->TypeGet()) == originalSize)) { // Just use the existing field's type argObj->gtType = fieldVarDsc->TypeGet(); } else { // Can't use the existing field's type, so use GT_LCL_FLD to swizzle // to a new type lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); argObj->ChangeOper(GT_LCL_FLD); argObj->gtType = structBaseType; } assert(varTypeIsEnregisterable(argObj->TypeGet())); assert(copyBlkClass == NO_CLASS_HANDLE); } else { // use GT_LCL_FLD to swizzle the single field struct to a new type lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); argObj->ChangeOper(GT_LCL_FLD); argObj->gtType = structBaseType; } } else { // The struct fits into a single register, but it has been promoted into its // constituent fields, and so we have to re-assemble it copyBlkClass = objClass; } } else if (genTypeSize(varDsc->TypeGet()) != genTypeSize(structBaseType)) { // Not a promoted struct, so just swizzle the type by using GT_LCL_FLD lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); argObj->ChangeOper(GT_LCL_FLD); argObj->gtType = structBaseType; } } else { // Not a GT_LCL_VAR, so we can just change the type on the node argObj->gtType = structBaseType; } assert(varTypeIsEnregisterable(argObj->TypeGet()) || ((copyBlkClass != NO_CLASS_HANDLE) && varTypeIsEnregisterable(structBaseType))); } #if !defined(UNIX_AMD64_ABI) && !defined(TARGET_ARMARCH) // TODO-CQ-XARCH: there is no need for a temp copy if we improve our code generation in // `genPutStructArgStk` for xarch like we did it for Arm/Arm64. // We still have a struct unless we converted the GT_OBJ into a GT_IND above... if (isHfaArg && passUsingFloatRegs) { } else if (structBaseType == TYP_STRUCT) { // If the valuetype size is not a multiple of TARGET_POINTER_SIZE, // we must copyblk to a temp before doing the obj to avoid // the obj reading memory past the end of the valuetype CLANG_FORMAT_COMMENT_ANCHOR; if (roundupSize > originalSize) { copyBlkClass = objClass; // There are a few special cases where we can omit using a CopyBlk // where we normally would need to use one. if (argObj->OperIs(GT_OBJ) && argObj->AsObj()->gtGetOp1()->IsLocalAddrExpr() != nullptr) // Is the source a LclVar? { copyBlkClass = NO_CLASS_HANDLE; } } } #endif // !UNIX_AMD64_ABI } } if (argEntry->isPassedInRegisters()) { call->fgArgInfo->UpdateRegArg(argEntry, argx, reMorphing); } else { call->fgArgInfo->UpdateStkArg(argEntry, argx, reMorphing); } if (copyBlkClass != NO_CLASS_HANDLE) { fgMakeOutgoingStructArgCopy(call, args, copyBlkClass); } if (argx->gtOper == GT_MKREFANY) { // 'Lower' the MKREFANY tree and insert it. noway_assert(!reMorphing); #ifdef TARGET_X86 // Build the mkrefany as a GT_FIELD_LIST GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList(); fieldList->AddField(this, argx->AsOp()->gtGetOp1(), OFFSETOF__CORINFO_TypedReference__dataPtr, TYP_BYREF); fieldList->AddField(this, argx->AsOp()->gtGetOp2(), OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL); fgArgTabEntry* fp = gtArgEntryByNode(call, argx); args->SetNode(fieldList); assert(fp->GetNode() == fieldList); #else // !TARGET_X86 // Get a new temp // Here we don't need unsafe value cls check since the addr of temp is used only in mkrefany unsigned tmp = lvaGrabTemp(true DEBUGARG("by-value mkrefany struct argument")); lvaSetStruct(tmp, impGetRefAnyClass(), false); // Build the mkrefany as a comma node: // (tmp.ptr=argx),(tmp.type=handle) GenTreeLclFld* destPtrSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__dataPtr); GenTreeLclFld* destTypeSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__type); destPtrSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyDataField())); destPtrSlot->gtFlags |= GTF_VAR_DEF; destTypeSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField())); destTypeSlot->gtFlags |= GTF_VAR_DEF; GenTree* asgPtrSlot = gtNewAssignNode(destPtrSlot, argx->AsOp()->gtOp1); GenTree* asgTypeSlot = gtNewAssignNode(destTypeSlot, argx->AsOp()->gtOp2); GenTree* asg = gtNewOperNode(GT_COMMA, TYP_VOID, asgPtrSlot, asgTypeSlot); // Change the expression to "(tmp=val)" args->SetNode(asg); // EvalArgsToTemps will cause tmp to actually get loaded as the argument call->fgArgInfo->EvalToTmp(argEntry, tmp, asg); lvaSetVarAddrExposed(tmp DEBUGARG(AddressExposedReason::TOO_CONSERVATIVE)); #endif // !TARGET_X86 } #if FEATURE_MULTIREG_ARGS if (isStructArg) { if (((argEntry->numRegs + argEntry->GetStackSlotsNumber()) > 1) || (isHfaArg && argx->TypeGet() == TYP_STRUCT)) { hasMultiregStructArgs = true; } } #ifdef TARGET_ARM else if ((argEntry->argType == TYP_LONG) || (argEntry->argType == TYP_DOUBLE)) { assert((argEntry->numRegs == 2) || (argEntry->numSlots == 2)); } #endif else { // We must have exactly one register or slot. assert(((argEntry->numRegs == 1) && (argEntry->GetStackSlotsNumber() == 0)) || ((argEntry->numRegs == 0) && (argEntry->GetStackSlotsNumber() == 1))); } #endif #if defined(TARGET_X86) if (isStructArg) { GenTree* lclNode = argx->OperIs(GT_LCL_VAR) ? argx : fgIsIndirOfAddrOfLocal(argx); if ((lclNode != nullptr) && (lvaGetPromotionType(lclNode->AsLclVarCommon()->GetLclNum()) == Compiler::PROMOTION_TYPE_INDEPENDENT)) { // Make a GT_FIELD_LIST of the field lclVars. GenTreeLclVarCommon* lcl = lclNode->AsLclVarCommon(); LclVarDsc* varDsc = lvaGetDesc(lcl); GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList(); fgArgTabEntry* fp = gtArgEntryByNode(call, argx); args->SetNode(fieldList); assert(fp->GetNode() == fieldList); for (unsigned fieldLclNum = varDsc->lvFieldLclStart; fieldLclNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++fieldLclNum) { LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); GenTree* fieldLcl; if (fieldLclNum == varDsc->lvFieldLclStart) { lcl->SetLclNum(fieldLclNum); lcl->SetOperResetFlags(GT_LCL_VAR); lcl->gtType = fieldVarDsc->TypeGet(); fieldLcl = lcl; } else { fieldLcl = gtNewLclvNode(fieldLclNum, fieldVarDsc->TypeGet()); } fieldList->AddField(this, fieldLcl, fieldVarDsc->lvFldOffset, fieldVarDsc->TypeGet()); } } } #endif // TARGET_X86 flagsSummary |= args->GetNode()->gtFlags; } // end foreach argument loop if (!reMorphing) { call->fgArgInfo->ArgsComplete(); } /* Process the function address, if indirect call */ if (call->gtCallType == CT_INDIRECT) { call->gtCallAddr = fgMorphTree(call->gtCallAddr); // Const CSE may create an assignment node here flagsSummary |= call->gtCallAddr->gtFlags; } #if FEATURE_FIXED_OUT_ARGS // Record the outgoing argument size. If the call is a fast tail // call, it will setup its arguments in incoming arg area instead // of the out-going arg area, so we don't need to track the // outgoing arg size. if (!call->IsFastTailCall()) { #if defined(UNIX_AMD64_ABI) // This is currently required for the UNIX ABI to work correctly. opts.compNeedToAlignFrame = true; #endif // UNIX_AMD64_ABI const unsigned outgoingArgSpaceSize = GetOutgoingArgByteSize(call->fgArgInfo->GetNextSlotByteOffset()); #if defined(DEBUG_ARG_SLOTS) unsigned preallocatedArgCount = 0; if (!compMacOsArm64Abi()) { preallocatedArgCount = call->fgArgInfo->GetNextSlotNum(); assert(outgoingArgSpaceSize == preallocatedArgCount * REGSIZE_BYTES); } #endif call->fgArgInfo->SetOutArgSize(max(outgoingArgSpaceSize, MIN_ARG_AREA_FOR_CALL)); #ifdef DEBUG if (verbose) { const fgArgInfo* argInfo = call->fgArgInfo; #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { printf("argSlots=%d, preallocatedArgCount=%d, nextSlotNum=%d, nextSlotByteOffset=%d, " "outgoingArgSpaceSize=%d\n", argSlots, preallocatedArgCount, argInfo->GetNextSlotNum(), argInfo->GetNextSlotByteOffset(), outgoingArgSpaceSize); } else { printf("nextSlotByteOffset=%d, outgoingArgSpaceSize=%d\n", argInfo->GetNextSlotByteOffset(), outgoingArgSpaceSize); } #else printf("nextSlotByteOffset=%d, outgoingArgSpaceSize=%d\n", argInfo->GetNextSlotByteOffset(), outgoingArgSpaceSize); #endif } #endif } #endif // FEATURE_FIXED_OUT_ARGS // Clear the ASG and EXCEPT (if possible) flags on the call node call->gtFlags &= ~GTF_ASG; if (!call->OperMayThrow(this)) { call->gtFlags &= ~GTF_EXCEPT; } // Union in the side effect flags from the call's operands call->gtFlags |= flagsSummary & GTF_ALL_EFFECT; // If we are remorphing or don't have any register arguments or other arguments that need // temps, then we don't need to call SortArgs() and EvalArgsToTemps(). // if (!reMorphing && (call->fgArgInfo->HasRegArgs() || call->fgArgInfo->NeedsTemps())) { // Do the 'defer or eval to temp' analysis. call->fgArgInfo->SortArgs(); call->fgArgInfo->EvalArgsToTemps(); } if (hasMultiregStructArgs) { fgMorphMultiregStructArgs(call); } #ifdef DEBUG if (verbose) { JITDUMP("ArgTable for %d.%s after fgMorphArgs:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); call->fgArgInfo->Dump(this); JITDUMP("\n"); } #endif return call; } #ifdef _PREFAST_ #pragma warning(pop) #endif //----------------------------------------------------------------------------- // fgMorphMultiregStructArgs: Locate the TYP_STRUCT arguments and // call fgMorphMultiregStructArg on each of them. // // Arguments: // call : a GenTreeCall node that has one or more TYP_STRUCT arguments\. // // Notes: // We only call fgMorphMultiregStructArg for struct arguments that are not passed as simple types. // It will ensure that the struct arguments are in the correct form. // If this method fails to find any TYP_STRUCT arguments it will assert. // void Compiler::fgMorphMultiregStructArgs(GenTreeCall* call) { bool foundStructArg = false; GenTreeFlags flagsSummary = GTF_EMPTY; #ifdef TARGET_X86 assert(!"Logic error: no MultiregStructArgs for X86"); #endif #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) assert(!"Logic error: no MultiregStructArgs for Windows X64 ABI"); #endif for (GenTreeCall::Use& use : call->Args()) { // For late arguments the arg tree that is overridden is in the gtCallLateArgs list. // For such late args the gtCallArgList contains the setup arg node (evaluating the arg.) // The tree from the gtCallLateArgs list is passed to the callee. The fgArgEntry node contains the mapping // between the nodes in both lists. If the arg is not a late arg, the fgArgEntry->node points to itself, // otherwise points to the list in the late args list. bool isLateArg = (use.GetNode()->gtFlags & GTF_LATE_ARG) != 0; fgArgTabEntry* fgEntryPtr = gtArgEntryByNode(call, use.GetNode()); assert(fgEntryPtr != nullptr); GenTree* argx = fgEntryPtr->GetNode(); GenTreeCall::Use* lateUse = nullptr; GenTree* lateNode = nullptr; if (isLateArg) { for (GenTreeCall::Use& lateArgUse : call->LateArgs()) { GenTree* argNode = lateArgUse.GetNode(); if (argx == argNode) { lateUse = &lateArgUse; lateNode = argNode; break; } } assert((lateUse != nullptr) && (lateNode != nullptr)); } if (!fgEntryPtr->isStruct) { continue; } unsigned size = (fgEntryPtr->numRegs + fgEntryPtr->GetStackSlotsNumber()); if ((size > 1) || (fgEntryPtr->IsHfaArg() && argx->TypeGet() == TYP_STRUCT)) { foundStructArg = true; if (varTypeIsStruct(argx) && !argx->OperIs(GT_FIELD_LIST)) { if (fgEntryPtr->IsHfaRegArg()) { var_types hfaType = fgEntryPtr->GetHfaType(); unsigned structSize; if (argx->OperIs(GT_OBJ)) { structSize = argx->AsObj()->GetLayout()->GetSize(); } else if (varTypeIsSIMD(argx)) { structSize = genTypeSize(argx); } else { assert(argx->OperIs(GT_LCL_VAR)); structSize = lvaGetDesc(argx->AsLclVar())->lvExactSize; } assert(structSize > 0); if (structSize == genTypeSize(hfaType)) { if (argx->OperIs(GT_OBJ)) { argx->SetOper(GT_IND); } argx->gtType = hfaType; } } GenTree* newArgx = fgMorphMultiregStructArg(argx, fgEntryPtr); // Did we replace 'argx' with a new tree? if (newArgx != argx) { // link the new arg node into either the late arg list or the gtCallArgs list if (isLateArg) { lateUse->SetNode(newArgx); } else { use.SetNode(newArgx); } assert(fgEntryPtr->GetNode() == newArgx); } } } } // We should only call this method when we actually have one or more multireg struct args assert(foundStructArg); // Update the flags call->gtFlags |= (flagsSummary & GTF_ALL_EFFECT); } //----------------------------------------------------------------------------- // fgMorphMultiregStructArg: Given a TYP_STRUCT arg from a call argument list, // morph the argument as needed to be passed correctly. // // Arguments: // arg - A GenTree node containing a TYP_STRUCT arg // fgEntryPtr - the fgArgTabEntry information for the current 'arg' // // Notes: // The arg must be a GT_OBJ or GT_LCL_VAR or GT_LCL_FLD of TYP_STRUCT. // If 'arg' is a lclVar passed on the stack, we will ensure that any lclVars that must be on the // stack are marked as doNotEnregister, and then we return. // // If it is passed by register, we mutate the argument into the GT_FIELD_LIST form // which is only used for struct arguments. // // If arg is a LclVar we check if it is struct promoted and has the right number of fields // and if they are at the appropriate offsets we will use the struct promted fields // in the GT_FIELD_LIST nodes that we create. // If we have a GT_LCL_VAR that isn't struct promoted or doesn't meet the requirements // we will use a set of GT_LCL_FLDs nodes to access the various portions of the struct // this also forces the struct to be stack allocated into the local frame. // For the GT_OBJ case will clone the address expression and generate two (or more) // indirections. // Currently the implementation handles ARM64/ARM and will NYI for other architectures. // GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr) { assert(varTypeIsStruct(arg->TypeGet())); #if !defined(TARGET_ARMARCH) && !defined(UNIX_AMD64_ABI) NYI("fgMorphMultiregStructArg requires implementation for this target"); #endif #ifdef TARGET_ARM if ((fgEntryPtr->IsSplit() && fgEntryPtr->GetStackSlotsNumber() + fgEntryPtr->numRegs > 4) || (!fgEntryPtr->IsSplit() && fgEntryPtr->GetRegNum() == REG_STK)) #else if (fgEntryPtr->GetRegNum() == REG_STK) #endif { GenTreeLclVarCommon* lcl = nullptr; GenTree* actualArg = arg->gtEffectiveVal(); if (actualArg->OperGet() == GT_OBJ) { if (actualArg->gtGetOp1()->OperIs(GT_ADDR) && actualArg->gtGetOp1()->gtGetOp1()->OperIs(GT_LCL_VAR)) { lcl = actualArg->gtGetOp1()->gtGetOp1()->AsLclVarCommon(); } } else if (actualArg->OperGet() == GT_LCL_VAR) { lcl = actualArg->AsLclVarCommon(); } if (lcl != nullptr) { if (lvaGetPromotionType(lcl->GetLclNum()) == PROMOTION_TYPE_INDEPENDENT) { arg = fgMorphLclArgToFieldlist(lcl); } else if (arg->TypeGet() == TYP_STRUCT) { // If this is a non-register struct, it must be referenced from memory. if (!actualArg->OperIs(GT_OBJ)) { // Create an Obj of the temp to use it as a call argument. arg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, arg); arg = gtNewObjNode(lvaGetStruct(lcl->GetLclNum()), arg); } // Its fields will need to be accessed by address. lvaSetVarDoNotEnregister(lcl->GetLclNum() DEBUG_ARG(DoNotEnregisterReason::IsStructArg)); } } return arg; } #if FEATURE_MULTIREG_ARGS // Examine 'arg' and setup argValue objClass and structSize // const CORINFO_CLASS_HANDLE objClass = gtGetStructHandle(arg); GenTree* argValue = arg; // normally argValue will be arg, but see right below unsigned structSize = 0; if (arg->TypeGet() != TYP_STRUCT) { structSize = genTypeSize(arg->TypeGet()); assert(structSize == info.compCompHnd->getClassSize(objClass)); } else if (arg->OperGet() == GT_OBJ) { GenTreeObj* argObj = arg->AsObj(); const ClassLayout* objLayout = argObj->GetLayout(); structSize = objLayout->GetSize(); assert(structSize == info.compCompHnd->getClassSize(objClass)); // If we have a GT_OBJ of a GT_ADDR then we set argValue to the child node of the GT_ADDR. GenTree* op1 = argObj->gtOp1; if (op1->OperGet() == GT_ADDR) { GenTree* underlyingTree = op1->AsOp()->gtOp1; // Only update to the same type. if (underlyingTree->OperIs(GT_LCL_VAR)) { const LclVarDsc* varDsc = lvaGetDesc(underlyingTree->AsLclVar()); if (ClassLayout::AreCompatible(varDsc->GetLayout(), objLayout)) { argValue = underlyingTree; } } } } else if (arg->OperGet() == GT_LCL_VAR) { LclVarDsc* varDsc = lvaGetDesc(arg->AsLclVarCommon()); structSize = varDsc->lvExactSize; assert(structSize == info.compCompHnd->getClassSize(objClass)); } else { structSize = info.compCompHnd->getClassSize(objClass); } var_types hfaType = TYP_UNDEF; var_types elemType = TYP_UNDEF; unsigned elemCount = 0; unsigned elemSize = 0; var_types type[MAX_ARG_REG_COUNT] = {}; // TYP_UNDEF = 0 hfaType = fgEntryPtr->GetHfaType(); if (varTypeIsValidHfaType(hfaType) && fgEntryPtr->isPassedInFloatRegisters()) { elemType = hfaType; elemSize = genTypeSize(elemType); elemCount = structSize / elemSize; assert(elemSize * elemCount == structSize); for (unsigned inx = 0; inx < elemCount; inx++) { type[inx] = elemType; } } else { assert(structSize <= MAX_ARG_REG_COUNT * TARGET_POINTER_SIZE); BYTE gcPtrs[MAX_ARG_REG_COUNT]; elemCount = roundUp(structSize, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; info.compCompHnd->getClassGClayout(objClass, &gcPtrs[0]); for (unsigned inx = 0; inx < elemCount; inx++) { #ifdef UNIX_AMD64_ABI if (gcPtrs[inx] == TYPE_GC_NONE) { type[inx] = GetTypeFromClassificationAndSizes(fgEntryPtr->structDesc.eightByteClassifications[inx], fgEntryPtr->structDesc.eightByteSizes[inx]); } else #endif // UNIX_AMD64_ABI { type[inx] = getJitGCType(gcPtrs[inx]); } } #ifndef UNIX_AMD64_ABI if ((argValue->OperGet() == GT_LCL_FLD) || (argValue->OperGet() == GT_LCL_VAR)) { elemSize = TARGET_POINTER_SIZE; // We can safely widen this to aligned bytes since we are loading from // a GT_LCL_VAR or a GT_LCL_FLD which is properly padded and // lives in the stack frame or will be a promoted field. // structSize = elemCount * TARGET_POINTER_SIZE; } else // we must have a GT_OBJ { assert(argValue->OperGet() == GT_OBJ); // We need to load the struct from an arbitrary address // and we can't read past the end of the structSize // We adjust the last load type here // unsigned remainingBytes = structSize % TARGET_POINTER_SIZE; unsigned lastElem = elemCount - 1; if (remainingBytes != 0) { switch (remainingBytes) { case 1: type[lastElem] = TYP_BYTE; break; case 2: type[lastElem] = TYP_SHORT; break; #if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) case 4: type[lastElem] = TYP_INT; break; #endif // (TARGET_ARM64) || (UNIX_AMD64_ABI) default: noway_assert(!"NYI: odd sized struct in fgMorphMultiregStructArg"); break; } } } #endif // !UNIX_AMD64_ABI } // We should still have a TYP_STRUCT assert(varTypeIsStruct(argValue->TypeGet())); GenTreeFieldList* newArg = nullptr; // Are we passing a struct LclVar? // if (argValue->OperGet() == GT_LCL_VAR) { GenTreeLclVarCommon* varNode = argValue->AsLclVarCommon(); unsigned varNum = varNode->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(varNum); // At this point any TYP_STRUCT LclVar must be an aligned struct // or an HFA struct, both which are passed by value. // assert((varDsc->lvSize() == elemCount * TARGET_POINTER_SIZE) || varDsc->lvIsHfa()); varDsc->lvIsMultiRegArg = true; #ifdef DEBUG if (verbose) { JITDUMP("Multireg struct argument V%02u : ", varNum); fgEntryPtr->Dump(); } #endif // DEBUG #ifndef UNIX_AMD64_ABI // This local variable must match the layout of the 'objClass' type exactly if (varDsc->lvIsHfa() && fgEntryPtr->isPassedInFloatRegisters()) { // We have a HFA struct. noway_assert(elemType == varDsc->GetHfaType()); noway_assert(elemSize == genTypeSize(elemType)); noway_assert(elemCount == (varDsc->lvExactSize / elemSize)); noway_assert(elemSize * elemCount == varDsc->lvExactSize); for (unsigned inx = 0; (inx < elemCount); inx++) { noway_assert(type[inx] == elemType); } } else { #if defined(TARGET_ARM64) // We must have a 16-byte struct (non-HFA) noway_assert(elemCount == 2); #elif defined(TARGET_ARM) noway_assert(elemCount <= 4); #endif for (unsigned inx = 0; inx < elemCount; inx++) { var_types currentGcLayoutType = varDsc->GetLayout()->GetGCPtrType(inx); // We setup the type[inx] value above using the GC info from 'objClass' // This GT_LCL_VAR must have the same GC layout info // if (varTypeIsGC(currentGcLayoutType)) { noway_assert(type[inx] == currentGcLayoutType); } else { // We may have use a small type when we setup the type[inx] values above // We can safely widen this to TYP_I_IMPL type[inx] = TYP_I_IMPL; } } } if (varDsc->lvPromoted && varDsc->lvIsHfa() && fgEntryPtr->isPassedInFloatRegisters()) { bool canMorphToFieldList = true; for (unsigned fldOffset = 0; fldOffset < varDsc->lvExactSize; fldOffset += elemSize) { const unsigned fldVarNum = lvaGetFieldLocal(varDsc, fldOffset); if ((fldVarNum == BAD_VAR_NUM) || !varTypeUsesFloatReg(lvaGetDesc(fldVarNum))) { canMorphToFieldList = false; break; } } if (canMorphToFieldList) { newArg = fgMorphLclArgToFieldlist(varNode); } } else #endif // !UNIX_AMD64_ABI #if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) // Is this LclVar a promoted struct with exactly 2 fields? if (varDsc->lvPromoted && (varDsc->lvFieldCnt == 2) && !varDsc->lvIsHfa()) { // See if we have two promoted fields that start at offset 0 and 8? unsigned loVarNum = lvaGetFieldLocal(varDsc, 0); unsigned hiVarNum = lvaGetFieldLocal(varDsc, TARGET_POINTER_SIZE); // Did we find the promoted fields at the necessary offsets? if ((loVarNum != BAD_VAR_NUM) && (hiVarNum != BAD_VAR_NUM)) { LclVarDsc* loVarDsc = lvaGetDesc(loVarNum); LclVarDsc* hiVarDsc = lvaGetDesc(hiVarNum); var_types loType = loVarDsc->lvType; var_types hiType = hiVarDsc->lvType; if ((varTypeIsFloating(loType) != genIsValidFloatReg(fgEntryPtr->GetRegNum(0))) || (varTypeIsFloating(hiType) != genIsValidFloatReg(fgEntryPtr->GetRegNum(1)))) { // TODO-LSRA - It currently doesn't support the passing of floating point LCL_VARS in the integer // registers. So for now we will use GT_LCLFLD's to pass this struct (it won't be enregistered) // JITDUMP("Multireg struct V%02u will be passed using GT_LCLFLD because it has float fields.\n", varNum); // // we call lvaSetVarDoNotEnregister and do the proper transformation below. // } else { // We can use the struct promoted field as the two arguments // Create a new tree for 'arg' // replace the existing LDOBJ(ADDR(LCLVAR)) // with a FIELD_LIST(LCLVAR-LO, FIELD_LIST(LCLVAR-HI, nullptr)) // newArg = new (this, GT_FIELD_LIST) GenTreeFieldList(); newArg->AddField(this, gtNewLclvNode(loVarNum, loType), 0, loType); newArg->AddField(this, gtNewLclvNode(hiVarNum, hiType), TARGET_POINTER_SIZE, hiType); } } } else { // // We will create a list of GT_LCL_FLDs nodes to pass this struct // lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField)); } #elif defined(TARGET_ARM) // Is this LclVar a promoted struct with exactly same size? if (varDsc->lvPromoted && (varDsc->lvFieldCnt == elemCount) && !varDsc->lvIsHfa()) { // See if we have promoted fields? unsigned varNums[4]; bool hasBadVarNum = false; for (unsigned inx = 0; inx < elemCount; inx++) { varNums[inx] = lvaGetFieldLocal(varDsc, TARGET_POINTER_SIZE * inx); if (varNums[inx] == BAD_VAR_NUM) { hasBadVarNum = true; break; } } // Did we find the promoted fields at the necessary offsets? if (!hasBadVarNum) { LclVarDsc* varDscs[4]; var_types varType[4]; bool varIsFloat = false; for (unsigned inx = 0; inx < elemCount; inx++) { varDscs[inx] = lvaGetDesc(varNums[inx]); varType[inx] = varDscs[inx]->lvType; if (varTypeIsFloating(varType[inx])) { // TODO-LSRA - It currently doesn't support the passing of floating point LCL_VARS in the // integer // registers. So for now we will use GT_LCLFLD's to pass this struct (it won't be enregistered) // JITDUMP("Multireg struct V%02u will be passed using GT_LCLFLD because it has float fields.\n", varNum); // // we call lvaSetVarDoNotEnregister and do the proper transformation below. // varIsFloat = true; break; } } if (!varIsFloat) { newArg = fgMorphLclArgToFieldlist(varNode); } } } else { // // We will create a list of GT_LCL_FLDs nodes to pass this struct // lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField)); } #endif // TARGET_ARM } // If we didn't set newarg to a new List Node tree // if (newArg == nullptr) { if (fgEntryPtr->GetRegNum() == REG_STK) { // We leave this stack passed argument alone return arg; } // Are we passing a GT_LCL_FLD (or a GT_LCL_VAR that was not struct promoted ) // A GT_LCL_FLD could also contain a 16-byte struct or HFA struct inside it? // if ((argValue->OperGet() == GT_LCL_FLD) || (argValue->OperGet() == GT_LCL_VAR)) { GenTreeLclVarCommon* varNode = argValue->AsLclVarCommon(); unsigned varNum = varNode->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(varNum); unsigned baseOffset = varNode->GetLclOffs(); unsigned lastOffset = baseOffset + structSize; // The allocated size of our LocalVar must be at least as big as lastOffset assert(varDsc->lvSize() >= lastOffset); if (varDsc->HasGCPtr()) { // alignment of the baseOffset is required noway_assert((baseOffset % TARGET_POINTER_SIZE) == 0); #ifndef UNIX_AMD64_ABI noway_assert(elemSize == TARGET_POINTER_SIZE); #endif unsigned baseIndex = baseOffset / TARGET_POINTER_SIZE; ClassLayout* layout = varDsc->GetLayout(); for (unsigned inx = 0; (inx < elemCount); inx++) { // The GC information must match what we setup using 'objClass' if (layout->IsGCPtr(baseIndex + inx) || varTypeGCtype(type[inx])) { noway_assert(type[inx] == layout->GetGCPtrType(baseIndex + inx)); } } } else // this varDsc contains no GC pointers { for (unsigned inx = 0; inx < elemCount; inx++) { // The GC information must match what we setup using 'objClass' noway_assert(!varTypeIsGC(type[inx])); } } // // We create a list of GT_LCL_FLDs nodes to pass this struct // lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField)); // Create a new tree for 'arg' // replace the existing LDOBJ(ADDR(LCLVAR)) // with a FIELD_LIST(LCLFLD-LO, LCLFLD-HI) // unsigned offset = baseOffset; newArg = new (this, GT_FIELD_LIST) GenTreeFieldList(); for (unsigned inx = 0; inx < elemCount; inx++) { GenTree* nextLclFld = gtNewLclFldNode(varNum, type[inx], offset); newArg->AddField(this, nextLclFld, offset, type[inx]); offset += genTypeSize(type[inx]); } } // Are we passing a GT_OBJ struct? // else if (argValue->OperGet() == GT_OBJ) { GenTreeObj* argObj = argValue->AsObj(); GenTree* baseAddr = argObj->gtOp1; var_types addrType = baseAddr->TypeGet(); if (baseAddr->OperGet() == GT_ADDR) { GenTree* addrTaken = baseAddr->AsOp()->gtOp1; if (addrTaken->IsLocal()) { GenTreeLclVarCommon* varNode = addrTaken->AsLclVarCommon(); unsigned varNum = varNode->GetLclNum(); // We access non-struct type (for example, long) as a struct type. // Make sure lclVar lives on stack to make sure its fields are accessible by address. lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::LocalField)); } } // Create a new tree for 'arg' // replace the existing LDOBJ(EXPR) // with a FIELD_LIST(IND(EXPR), FIELD_LIST(IND(EXPR+8), nullptr) ...) // newArg = new (this, GT_FIELD_LIST) GenTreeFieldList(); unsigned offset = 0; for (unsigned inx = 0; inx < elemCount; inx++) { GenTree* curAddr = baseAddr; if (offset != 0) { GenTree* baseAddrDup = gtCloneExpr(baseAddr); noway_assert(baseAddrDup != nullptr); curAddr = gtNewOperNode(GT_ADD, addrType, baseAddrDup, gtNewIconNode(offset, TYP_I_IMPL)); } else { curAddr = baseAddr; } GenTree* curItem = gtNewIndir(type[inx], curAddr); // For safety all GT_IND should have at least GT_GLOB_REF set. curItem->gtFlags |= GTF_GLOB_REF; newArg->AddField(this, curItem, offset, type[inx]); offset += genTypeSize(type[inx]); } } } #ifdef DEBUG // If we reach here we should have set newArg to something if (newArg == nullptr) { gtDispTree(argValue); assert(!"Missing case in fgMorphMultiregStructArg"); } #endif noway_assert(newArg != nullptr); #ifdef DEBUG if (verbose) { printf("fgMorphMultiregStructArg created tree:\n"); gtDispTree(newArg); } #endif arg = newArg; // consider calling fgMorphTree(newArg); #endif // FEATURE_MULTIREG_ARGS return arg; } //------------------------------------------------------------------------ // fgMorphLclArgToFieldlist: Morph a GT_LCL_VAR node to a GT_FIELD_LIST of its promoted fields // // Arguments: // lcl - The GT_LCL_VAR node we will transform // // Return value: // The new GT_FIELD_LIST that we have created. // GenTreeFieldList* Compiler::fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl) { LclVarDsc* varDsc = lvaGetDesc(lcl); assert(varDsc->lvPromoted); unsigned fieldCount = varDsc->lvFieldCnt; unsigned fieldLclNum = varDsc->lvFieldLclStart; GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList(); for (unsigned i = 0; i < fieldCount; i++) { LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); GenTree* lclVar = gtNewLclvNode(fieldLclNum, fieldVarDsc->TypeGet()); fieldList->AddField(this, lclVar, fieldVarDsc->lvFldOffset, fieldVarDsc->TypeGet()); fieldLclNum++; } return fieldList; } //------------------------------------------------------------------------ // fgMakeOutgoingStructArgCopy: make a copy of a struct variable if necessary, // to pass to a callee. // // Arguments: // call - call being processed // args - args for the call // copyBlkClass - class handle for the struct // // The arg is updated if necessary with the copy. // void Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass) { GenTree* argx = args->GetNode(); noway_assert(argx->gtOper != GT_MKREFANY); fgArgTabEntry* argEntry = Compiler::gtArgEntryByNode(call, argx); // If we're optimizing, see if we can avoid making a copy. // // We don't need a copy if this is the last use of an implicit by-ref local. // if (opts.OptimizationEnabled()) { GenTreeLclVar* const lcl = argx->IsImplicitByrefParameterValue(this); if (lcl != nullptr) { const unsigned varNum = lcl->GetLclNum(); LclVarDsc* const varDsc = lvaGetDesc(varNum); const unsigned short totalAppearances = varDsc->lvRefCnt(RCS_EARLY); // We don't have liveness so we rely on other indications of last use. // // We handle these cases: // // * (must not copy) If the call is a tail call, the use is a last use. // We must skip the copy if we have a fast tail call. // // * (may not copy) if the call is noreturn, the use is a last use. // We also check for just one reference here as we are not doing // alias analysis of the call's parameters, or checking if the call // site is not within some try region. // // * (may not copy) if there is exactly one use of the local in the method, // and the call is not in loop, this is a last use. // // fgMightHaveLoop() is expensive; check it last, only if necessary. // if (call->IsTailCall() || // ((totalAppearances == 1) && call->IsNoReturn()) || // ((totalAppearances == 1) && !fgMightHaveLoop())) { args->SetNode(lcl); assert(argEntry->GetNode() == lcl); JITDUMP("did not need to make outgoing copy for last use of implicit byref V%2d\n", varNum); return; } } } JITDUMP("making an outgoing copy for struct arg\n"); if (fgOutgoingArgTemps == nullptr) { fgOutgoingArgTemps = hashBv::Create(this); } unsigned tmp = 0; bool found = false; // Attempt to find a local we have already used for an outgoing struct and reuse it. // We do not reuse within a statement. if (!opts.MinOpts()) { indexType lclNum; FOREACH_HBV_BIT_SET(lclNum, fgOutgoingArgTemps) { LclVarDsc* varDsc = lvaGetDesc((unsigned)lclNum); if (typeInfo::AreEquivalent(varDsc->lvVerTypeInfo, typeInfo(TI_STRUCT, copyBlkClass)) && !fgCurrentlyInUseArgTemps->testBit(lclNum)) { tmp = (unsigned)lclNum; found = true; JITDUMP("reusing outgoing struct arg"); break; } } NEXT_HBV_BIT_SET; } // Create the CopyBlk tree and insert it. if (!found) { // Get a new temp // Here We don't need unsafe value cls check, since the addr of this temp is used only in copyblk. tmp = lvaGrabTemp(true DEBUGARG("by-value struct argument")); lvaSetStruct(tmp, copyBlkClass, false); if (call->IsVarargs()) { lvaSetStructUsedAsVarArg(tmp); } fgOutgoingArgTemps->setBit(tmp); } fgCurrentlyInUseArgTemps->setBit(tmp); // TYP_SIMD structs should not be enregistered, since ABI requires it to be // allocated on stack and address of it needs to be passed. if (lclVarIsSIMDType(tmp)) { // TODO: check if we need this block here or other parts already deal with it. lvaSetVarDoNotEnregister(tmp DEBUGARG(DoNotEnregisterReason::IsStructArg)); } // Create a reference to the temp GenTree* dest = gtNewLclvNode(tmp, lvaTable[tmp].lvType); dest->gtFlags |= (GTF_DONT_CSE | GTF_VAR_DEF); // This is a def of the local, "entire" by construction. // Copy the valuetype to the temp GenTree* copyBlk = gtNewBlkOpNode(dest, argx, false /* not volatile */, true /* copyBlock */); copyBlk = fgMorphCopyBlock(copyBlk); #if FEATURE_FIXED_OUT_ARGS // Do the copy early, and evalute the temp later (see EvalArgsToTemps) // When on Unix create LCL_FLD for structs passed in more than one registers. See fgMakeTmpArgNode GenTree* arg = copyBlk; #else // FEATURE_FIXED_OUT_ARGS // Structs are always on the stack, and thus never need temps // so we have to put the copy and temp all into one expression. argEntry->tmpNum = tmp; GenTree* arg = fgMakeTmpArgNode(argEntry); // Change the expression to "(tmp=val),tmp" arg = gtNewOperNode(GT_COMMA, arg->TypeGet(), copyBlk, arg); #endif // FEATURE_FIXED_OUT_ARGS args->SetNode(arg); call->fgArgInfo->EvalToTmp(argEntry, tmp, arg); } #ifdef TARGET_ARM // See declaration for specification comment. void Compiler::fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, regMaskTP* pArgSkippedRegMask) { assert(varDsc->lvPromoted); // There's no way to do these calculations without breaking abstraction and assuming that // integer register arguments are consecutive ints. They are on ARM. // To start, figure out what register contains the last byte of the first argument. LclVarDsc* firstFldVarDsc = lvaGetDesc(varDsc->lvFieldLclStart); unsigned lastFldRegOfLastByte = (firstFldVarDsc->lvFldOffset + firstFldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE; ; // Now we're keeping track of the register that the last field ended in; see what registers // subsequent fields start in, and whether any are skipped. // (We assume here the invariant that the fields are sorted in offset order.) for (unsigned fldVarOffset = 1; fldVarOffset < varDsc->lvFieldCnt; fldVarOffset++) { unsigned fldVarNum = varDsc->lvFieldLclStart + fldVarOffset; LclVarDsc* fldVarDsc = lvaGetDesc(fldVarNum); unsigned fldRegOffset = fldVarDsc->lvFldOffset / TARGET_POINTER_SIZE; assert(fldRegOffset >= lastFldRegOfLastByte); // Assuming sorted fields. // This loop should enumerate the offsets of any registers skipped. // Find what reg contains the last byte: // And start at the first register after that. If that isn't the first reg of the current for (unsigned skippedRegOffsets = lastFldRegOfLastByte + 1; skippedRegOffsets < fldRegOffset; skippedRegOffsets++) { // If the register number would not be an arg reg, we're done. if (firstArgRegNum + skippedRegOffsets >= MAX_REG_ARG) return; *pArgSkippedRegMask |= genRegMask(regNumber(firstArgRegNum + skippedRegOffsets)); } lastFldRegOfLastByte = (fldVarDsc->lvFldOffset + fldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE; } } #endif // TARGET_ARM /***************************************************************************** * * A little helper used to rearrange nested commutative operations. The * effect is that nested associative, commutative operations are transformed * into a 'left-deep' tree, i.e. into something like this: * * (((a op b) op c) op d) op... */ #if REARRANGE_ADDS void Compiler::fgMoveOpsLeft(GenTree* tree) { GenTree* op1; GenTree* op2; genTreeOps oper; do { op1 = tree->AsOp()->gtOp1; op2 = tree->AsOp()->gtOp2; oper = tree->OperGet(); noway_assert(GenTree::OperIsCommutative(oper)); noway_assert(oper == GT_ADD || oper == GT_XOR || oper == GT_OR || oper == GT_AND || oper == GT_MUL); noway_assert(!varTypeIsFloating(tree->TypeGet()) || !opts.genFPorder); noway_assert(oper == op2->gtOper); // Commutativity doesn't hold if overflow checks are needed if (tree->gtOverflowEx() || op2->gtOverflowEx()) { return; } if (gtIsActiveCSE_Candidate(op2)) { // If we have marked op2 as a CSE candidate, // we can't perform a commutative reordering // because any value numbers that we computed for op2 // will be incorrect after performing a commutative reordering // return; } if (oper == GT_MUL && (op2->gtFlags & GTF_MUL_64RSLT)) { return; } // Check for GTF_ADDRMODE_NO_CSE flag on add/mul Binary Operators if (((oper == GT_ADD) || (oper == GT_MUL)) && ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0)) { return; } if ((tree->gtFlags | op2->gtFlags) & GTF_BOOLEAN) { // We could deal with this, but we were always broken and just hit the assert // below regarding flags, which means it's not frequent, so will just bail out. // See #195514 return; } noway_assert(!tree->gtOverflowEx() && !op2->gtOverflowEx()); GenTree* ad1 = op2->AsOp()->gtOp1; GenTree* ad2 = op2->AsOp()->gtOp2; // Compiler::optOptimizeBools() can create GT_OR of two GC pointers yeilding a GT_INT // We can not reorder such GT_OR trees // if (varTypeIsGC(ad1->TypeGet()) != varTypeIsGC(op2->TypeGet())) { break; } // Don't split up a byref calculation and create a new byref. E.g., // [byref]+ (ref, [int]+ (int, int)) => [byref]+ ([byref]+ (ref, int), int). // Doing this transformation could create a situation where the first // addition (that is, [byref]+ (ref, int) ) creates a byref pointer that // no longer points within the ref object. If a GC happens, the byref won't // get updated. This can happen, for instance, if one of the int components // is negative. It also requires the address generation be in a fully-interruptible // code region. // if (varTypeIsGC(op1->TypeGet()) && op2->TypeGet() == TYP_I_IMPL) { assert(varTypeIsGC(tree->TypeGet()) && (oper == GT_ADD)); break; } /* Change "(x op (y op z))" to "(x op y) op z" */ /* ie. "(op1 op (ad1 op ad2))" to "(op1 op ad1) op ad2" */ GenTree* new_op1 = op2; new_op1->AsOp()->gtOp1 = op1; new_op1->AsOp()->gtOp2 = ad1; /* Change the flags. */ // Make sure we arent throwing away any flags noway_assert((new_op1->gtFlags & ~(GTF_MAKE_CSE | GTF_DONT_CSE | // It is ok that new_op1->gtFlags contains GTF_DONT_CSE flag. GTF_REVERSE_OPS | // The reverse ops flag also can be set, it will be re-calculated GTF_NODE_MASK | GTF_ALL_EFFECT | GTF_UNSIGNED)) == 0); new_op1->gtFlags = (new_op1->gtFlags & (GTF_NODE_MASK | GTF_DONT_CSE)) | // Make sure we propagate GTF_DONT_CSE flag. (op1->gtFlags & GTF_ALL_EFFECT) | (ad1->gtFlags & GTF_ALL_EFFECT); /* Retype new_op1 if it has not/become a GC ptr. */ if (varTypeIsGC(op1->TypeGet())) { noway_assert((varTypeIsGC(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL && oper == GT_ADD) || // byref(ref + (int+int)) (varTypeIsI(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL && oper == GT_OR)); // int(gcref | int(gcref|intval)) new_op1->gtType = tree->gtType; } else if (varTypeIsGC(ad2->TypeGet())) { // Neither ad1 nor op1 are GC. So new_op1 isnt either noway_assert(op1->gtType == TYP_I_IMPL && ad1->gtType == TYP_I_IMPL); new_op1->gtType = TYP_I_IMPL; } // If new_op1 is a new expression. Assign it a new unique value number. // vnStore is null before the ValueNumber phase has run if (vnStore != nullptr) { // We can only keep the old value number on new_op1 if both op1 and ad2 // have the same non-NoVN value numbers. Since op is commutative, comparing // only ad2 and op1 is enough. if ((op1->gtVNPair.GetLiberal() == ValueNumStore::NoVN) || (ad2->gtVNPair.GetLiberal() == ValueNumStore::NoVN) || (ad2->gtVNPair.GetLiberal() != op1->gtVNPair.GetLiberal())) { new_op1->gtVNPair.SetBoth(vnStore->VNForExpr(nullptr, new_op1->TypeGet())); } } tree->AsOp()->gtOp1 = new_op1; tree->AsOp()->gtOp2 = ad2; /* If 'new_op1' is now the same nested op, process it recursively */ if ((ad1->gtOper == oper) && !ad1->gtOverflowEx()) { fgMoveOpsLeft(new_op1); } /* If 'ad2' is now the same nested op, process it * Instead of recursion, we set up op1 and op2 for the next loop. */ op1 = new_op1; op2 = ad2; } while ((op2->gtOper == oper) && !op2->gtOverflowEx()); return; } #endif /*****************************************************************************/ void Compiler::fgSetRngChkTarget(GenTree* tree, bool delay) { if (tree->OperIs(GT_BOUNDS_CHECK)) { GenTreeBoundsChk* const boundsChk = tree->AsBoundsChk(); BasicBlock* const failBlock = fgSetRngChkTargetInner(boundsChk->gtThrowKind, delay); if (failBlock != nullptr) { boundsChk->gtIndRngFailBB = failBlock; } } else if (tree->OperIs(GT_INDEX_ADDR)) { GenTreeIndexAddr* const indexAddr = tree->AsIndexAddr(); BasicBlock* const failBlock = fgSetRngChkTargetInner(SCK_RNGCHK_FAIL, delay); if (failBlock != nullptr) { indexAddr->gtIndRngFailBB = failBlock; } } else { noway_assert(tree->OperIs(GT_ARR_ELEM, GT_ARR_INDEX)); fgSetRngChkTargetInner(SCK_RNGCHK_FAIL, delay); } } BasicBlock* Compiler::fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay) { if (opts.MinOpts()) { delay = false; } if (!opts.compDbgCode) { if (!delay && !compIsForInlining()) { // Create/find the appropriate "range-fail" label return fgRngChkTarget(compCurBB, kind); } } return nullptr; } /***************************************************************************** * * Expand a GT_INDEX node and fully morph the child operands * * The orginal GT_INDEX node is bashed into the GT_IND node that accesses * the array element. We expand the GT_INDEX node into a larger tree that * evaluates the array base and index. The simplest expansion is a GT_COMMA * with a GT_BOUNDS_CHECK and a GT_IND with a GTF_INX_RNGCHK flag. * For complex array or index expressions one or more GT_COMMA assignments * are inserted so that we only evaluate the array or index expressions once. * * The fully expanded tree is then morphed. This causes gtFoldExpr to * perform local constant prop and reorder the constants in the tree and * fold them. * * We then parse the resulting array element expression in order to locate * and label the constants and variables that occur in the tree. */ const int MAX_ARR_COMPLEXITY = 4; const int MAX_INDEX_COMPLEXITY = 4; GenTree* Compiler::fgMorphArrayIndex(GenTree* tree) { noway_assert(tree->gtOper == GT_INDEX); GenTreeIndex* asIndex = tree->AsIndex(); var_types elemTyp = asIndex->TypeGet(); unsigned elemSize = asIndex->gtIndElemSize; CORINFO_CLASS_HANDLE elemStructType = asIndex->gtStructElemClass; noway_assert(elemTyp != TYP_STRUCT || elemStructType != nullptr); // Fold "cns_str"[cns_index] to ushort constant // NOTE: don't do it for empty string, the operation will fail anyway if (opts.OptimizationEnabled() && asIndex->Arr()->OperIs(GT_CNS_STR) && !asIndex->Arr()->AsStrCon()->IsStringEmptyField() && asIndex->Index()->IsIntCnsFitsInI32()) { const int cnsIndex = static_cast<int>(asIndex->Index()->AsIntConCommon()->IconValue()); if (cnsIndex >= 0) { int length; const char16_t* str = info.compCompHnd->getStringLiteral(asIndex->Arr()->AsStrCon()->gtScpHnd, asIndex->Arr()->AsStrCon()->gtSconCPX, &length); if ((cnsIndex < length) && (str != nullptr)) { GenTree* cnsCharNode = gtNewIconNode(str[cnsIndex], TYP_INT); INDEBUG(cnsCharNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return cnsCharNode; } } } #ifdef FEATURE_SIMD if (supportSIMDTypes() && varTypeIsStruct(elemTyp) && structSizeMightRepresentSIMDType(elemSize)) { // If this is a SIMD type, this is the point at which we lose the type information, // so we need to set the correct type on the GT_IND. // (We don't care about the base type here, so we only check, but don't retain, the return value). unsigned simdElemSize = 0; if (getBaseJitTypeAndSizeOfSIMDType(elemStructType, &simdElemSize) != CORINFO_TYPE_UNDEF) { assert(simdElemSize == elemSize); elemTyp = getSIMDTypeForSize(elemSize); // This is the new type of the node. tree->gtType = elemTyp; // Now set elemStructType to null so that we don't confuse value numbering. elemStructType = nullptr; } } #endif // FEATURE_SIMD // Set up the array length's offset into lenOffs // And the first element's offset into elemOffs ssize_t lenOffs; ssize_t elemOffs; if (tree->gtFlags & GTF_INX_STRING_LAYOUT) { lenOffs = OFFSETOF__CORINFO_String__stringLen; elemOffs = OFFSETOF__CORINFO_String__chars; tree->gtFlags &= ~GTF_INX_STRING_LAYOUT; // Clear this flag as it is used for GTF_IND_VOLATILE } else { // We have a standard array lenOffs = OFFSETOF__CORINFO_Array__length; elemOffs = OFFSETOF__CORINFO_Array__data; } // In minopts, we expand GT_INDEX to GT_IND(GT_INDEX_ADDR) in order to minimize the size of the IR. As minopts // compilation time is roughly proportional to the size of the IR, this helps keep compilation times down. // Furthermore, this representation typically saves on code size in minopts w.r.t. the complete expansion // performed when optimizing, as it does not require LclVar nodes (which are always stack loads/stores in // minopts). // // When we *are* optimizing, we fully expand GT_INDEX to: // 1. Evaluate the array address expression and store the result in a temp if the expression is complex or // side-effecting. // 2. Evaluate the array index expression and store the result in a temp if the expression is complex or // side-effecting. // 3. Perform an explicit bounds check: GT_BOUNDS_CHECK(index, GT_ARR_LENGTH(array)) // 4. Compute the address of the element that will be accessed: // GT_ADD(GT_ADD(array, firstElementOffset), GT_MUL(index, elementSize)) // 5. Dereference the address with a GT_IND. // // This expansion explicitly exposes the bounds check and the address calculation to the optimizer, which allows // for more straightforward bounds-check removal, CSE, etc. if (opts.MinOpts()) { GenTree* const array = fgMorphTree(asIndex->Arr()); GenTree* const index = fgMorphTree(asIndex->Index()); GenTreeIndexAddr* const indexAddr = new (this, GT_INDEX_ADDR) GenTreeIndexAddr(array, index, elemTyp, elemStructType, elemSize, static_cast<unsigned>(lenOffs), static_cast<unsigned>(elemOffs)); indexAddr->gtFlags |= (array->gtFlags | index->gtFlags) & GTF_ALL_EFFECT; // Mark the indirection node as needing a range check if necessary. // Note this will always be true unless JitSkipArrayBoundCheck() is used if ((indexAddr->gtFlags & GTF_INX_RNGCHK) != 0) { fgSetRngChkTarget(indexAddr); } if (!tree->TypeIs(TYP_STRUCT)) { tree->ChangeOper(GT_IND); } else { DEBUG_DESTROY_NODE(tree); tree = gtNewObjNode(elemStructType, indexAddr); INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); } GenTreeIndir* const indir = tree->AsIndir(); indir->Addr() = indexAddr; bool canCSE = indir->CanCSE(); indir->gtFlags = GTF_IND_ARR_INDEX | (indexAddr->gtFlags & GTF_ALL_EFFECT); if (!canCSE) { indir->SetDoNotCSE(); } INDEBUG(indexAddr->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return indir; } GenTree* arrRef = asIndex->Arr(); GenTree* index = asIndex->Index(); bool chkd = ((tree->gtFlags & GTF_INX_RNGCHK) != 0); // if false, range checking will be disabled bool indexNonFaulting = ((tree->gtFlags & GTF_INX_NOFAULT) != 0); // if true, mark GTF_IND_NONFAULTING bool nCSE = ((tree->gtFlags & GTF_DONT_CSE) != 0); GenTree* arrRefDefn = nullptr; // non-NULL if we need to allocate a temp for the arrRef expression GenTree* indexDefn = nullptr; // non-NULL if we need to allocate a temp for the index expression GenTree* bndsChk = nullptr; // If we're doing range checking, introduce a GT_BOUNDS_CHECK node for the address. if (chkd) { GenTree* arrRef2 = nullptr; // The second copy will be used in array address expression GenTree* index2 = nullptr; // If the arrRef or index expressions involves an assignment, a call or reads from global memory, // then we *must* allocate a temporary in which to "localize" those values, to ensure that the // same values are used in the bounds check and the actual dereference. // Also we allocate the temporary when the expresion is sufficiently complex/expensive. // // Note that if the expression is a GT_FIELD, it has not yet been morphed so its true complexity is // not exposed. Without that condition there are cases of local struct fields that were previously, // needlessly, marked as GTF_GLOB_REF, and when that was fixed, there were some regressions that // were mostly ameliorated by adding this condition. // // Likewise, allocate a temporary if the expression is a GT_LCL_FLD node. These used to be created // after fgMorphArrayIndex from GT_FIELD trees so this preserves the existing behavior. This is // perhaps a decision that should be left to CSE but FX diffs show that it is slightly better to // do this here. if ((arrRef->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) || gtComplexityExceeds(&arrRef, MAX_ARR_COMPLEXITY) || arrRef->OperIs(GT_FIELD, GT_LCL_FLD)) { unsigned arrRefTmpNum = lvaGrabTemp(true DEBUGARG("arr expr")); arrRefDefn = gtNewTempAssign(arrRefTmpNum, arrRef); arrRef = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet()); arrRef2 = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet()); } else { arrRef2 = gtCloneExpr(arrRef); noway_assert(arrRef2 != nullptr); } if ((index->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) || gtComplexityExceeds(&index, MAX_ARR_COMPLEXITY) || index->OperIs(GT_FIELD, GT_LCL_FLD)) { unsigned indexTmpNum = lvaGrabTemp(true DEBUGARG("index expr")); indexDefn = gtNewTempAssign(indexTmpNum, index); index = gtNewLclvNode(indexTmpNum, index->TypeGet()); index2 = gtNewLclvNode(indexTmpNum, index->TypeGet()); } else { index2 = gtCloneExpr(index); noway_assert(index2 != nullptr); } // Next introduce a GT_BOUNDS_CHECK node var_types bndsChkType = TYP_INT; // By default, try to use 32-bit comparison for array bounds check. #ifdef TARGET_64BIT // The CLI Spec allows an array to be indexed by either an int32 or a native int. In the case // of a 64 bit architecture this means the array index can potentially be a TYP_LONG, so for this case, // the comparison will have to be widen to 64 bits. if (index->TypeGet() == TYP_I_IMPL) { bndsChkType = TYP_I_IMPL; } #endif // TARGET_64BIT GenTree* arrLen = gtNewArrLen(TYP_INT, arrRef, (int)lenOffs, compCurBB); if (bndsChkType != TYP_INT) { arrLen = gtNewCastNode(bndsChkType, arrLen, true, bndsChkType); } GenTreeBoundsChk* arrBndsChk = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, arrLen, SCK_RNGCHK_FAIL); bndsChk = arrBndsChk; // Now we'll switch to using the second copies for arrRef and index // to compute the address expression arrRef = arrRef2; index = index2; } // Create the "addr" which is "*(arrRef + ((index * elemSize) + elemOffs))" GenTree* addr; #ifdef TARGET_64BIT // Widen 'index' on 64-bit targets if (index->TypeGet() != TYP_I_IMPL) { if (index->OperGet() == GT_CNS_INT) { index->gtType = TYP_I_IMPL; } else { index = gtNewCastNode(TYP_I_IMPL, index, true, TYP_I_IMPL); } } #endif // TARGET_64BIT /* Scale the index value if necessary */ if (elemSize > 1) { GenTree* size = gtNewIconNode(elemSize, TYP_I_IMPL); // Fix 392756 WP7 Crossgen // // During codegen optGetArrayRefScaleAndIndex() makes the assumption that op2 of a GT_MUL node // is a constant and is not capable of handling CSE'ing the elemSize constant into a lclvar. // Hence to prevent the constant from becoming a CSE we mark it as NO_CSE. // size->gtFlags |= GTF_DONT_CSE; /* Multiply by the array element size */ addr = gtNewOperNode(GT_MUL, TYP_I_IMPL, index, size); } else { addr = index; } // Be careful to only create the byref pointer when the full index expression is added to the array reference. // We don't want to create a partial byref address expression that doesn't include the full index offset: // a byref must point within the containing object. It is dangerous (especially when optimizations come into // play) to create a "partial" byref that doesn't point exactly to the correct object; there is risk that // the partial byref will not point within the object, and thus not get updated correctly during a GC. // This is mostly a risk in fully-interruptible code regions. // We can generate two types of trees for "addr": // // 1) "arrRef + (index + elemOffset)" // 2) "(arrRef + elemOffset) + index" // // XArch has powerful addressing modes such as [base + index*scale + offset] so it's fine with 1), // while for Arm we better try to make an invariant sub-tree as large as possible, which is usually // "(arrRef + elemOffset)" and is CSE/LoopHoisting friendly => produces better codegen. // 2) should still be safe from GC's point of view since both ADD operations are byref and point to // within the object so GC will be able to correctly track and update them. bool groupArrayRefWithElemOffset = false; #ifdef TARGET_ARMARCH groupArrayRefWithElemOffset = true; // TODO: in some cases even on ARM we better use 1) shape because if "index" is invariant and "arrRef" is not // we at least will be able to hoist/CSE "index + elemOffset" in some cases. // See https://github.com/dotnet/runtime/pull/61293#issuecomment-964146497 // Use 2) form only for primitive types for now - it significantly reduced number of size regressions if (!varTypeIsIntegral(elemTyp) && !varTypeIsFloating(elemTyp)) { groupArrayRefWithElemOffset = false; } #endif // First element's offset GenTree* elemOffset = gtNewIconNode(elemOffs, TYP_I_IMPL); if (groupArrayRefWithElemOffset) { GenTree* basePlusOffset = gtNewOperNode(GT_ADD, TYP_BYREF, arrRef, elemOffset); addr = gtNewOperNode(GT_ADD, TYP_BYREF, basePlusOffset, addr); } else { addr = gtNewOperNode(GT_ADD, TYP_I_IMPL, addr, elemOffset); addr = gtNewOperNode(GT_ADD, TYP_BYREF, arrRef, addr); } assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE) != 0) || (GenTree::s_gtNodeSizes[GT_IND] == TREE_NODE_SZ_SMALL)); // Change the orginal GT_INDEX node into a GT_IND node tree->SetOper(GT_IND); // If the index node is a floating-point type, notify the compiler // we'll potentially use floating point registers at the time of codegen. if (varTypeUsesFloatReg(tree->gtType)) { this->compFloatingPointUsed = true; } // We've now consumed the GTF_INX_RNGCHK and GTF_INX_NOFAULT, and the node // is no longer a GT_INDEX node. tree->gtFlags &= ~(GTF_INX_RNGCHK | GTF_INX_NOFAULT); tree->AsOp()->gtOp1 = addr; // This is an array index expression. tree->gtFlags |= GTF_IND_ARR_INDEX; // If there's a bounds check, the indir won't fault. if (bndsChk || indexNonFaulting) { tree->gtFlags |= GTF_IND_NONFAULTING; } else { tree->gtFlags |= GTF_EXCEPT; } if (nCSE) { tree->gtFlags |= GTF_DONT_CSE; } // Store information about it. GetArrayInfoMap()->Set(tree, ArrayInfo(elemTyp, elemSize, (int)elemOffs, elemStructType)); // Remember this 'indTree' that we just created, as we still need to attach the fieldSeq information to it. GenTree* indTree = tree; // Did we create a bndsChk tree? if (bndsChk) { // Use a GT_COMMA node to prepend the array bound check // tree = gtNewOperNode(GT_COMMA, elemTyp, bndsChk, tree); /* Mark the indirection node as needing a range check */ fgSetRngChkTarget(bndsChk); } if (indexDefn != nullptr) { // Use a GT_COMMA node to prepend the index assignment // tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), indexDefn, tree); } if (arrRefDefn != nullptr) { // Use a GT_COMMA node to prepend the arRef assignment // tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), arrRefDefn, tree); } JITDUMP("fgMorphArrayIndex (before remorph):\n") DISPTREE(tree) // Currently we morph the tree to perform some folding operations prior // to attaching fieldSeq info and labeling constant array index contributions // tree = fgMorphTree(tree); JITDUMP("fgMorphArrayIndex (after remorph):\n") DISPTREE(tree) // Ideally we just want to proceed to attaching fieldSeq info and labeling the // constant array index contributions, but the morphing operation may have changed // the 'tree' into something that now unconditionally throws an exception. // // In such case the gtEffectiveVal could be a new tree or it's gtOper could be modified // or it could be left unchanged. If it is unchanged then we should not return, // instead we should proceed to attaching fieldSeq info, etc... // GenTree* arrElem = tree->gtEffectiveVal(); if (fgIsCommaThrow(tree)) { if ((arrElem != indTree) || // A new tree node may have been created (!indTree->OperIs(GT_IND))) // The GT_IND may have been changed to a GT_CNS_INT { return tree; // Just return the Comma-Throw, don't try to attach the fieldSeq info, etc.. } } assert(!fgGlobalMorph || (arrElem->gtDebugFlags & GTF_DEBUG_NODE_MORPHED)); DBEXEC(fgGlobalMorph && (arrElem == tree), tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED) addr = arrElem->gtGetOp1(); GenTree* cnsOff = nullptr; if (addr->OperIs(GT_ADD)) { GenTree* addrOp1 = addr->gtGetOp1(); if (groupArrayRefWithElemOffset) { if (addrOp1->OperIs(GT_ADD) && addrOp1->gtGetOp2()->IsCnsIntOrI()) { assert(addrOp1->gtGetOp1()->TypeIs(TYP_REF)); cnsOff = addrOp1->gtGetOp2(); addr = addr->gtGetOp2(); // Label any constant array index contributions with #ConstantIndex and any LclVars with // GTF_VAR_ARR_INDEX addr->LabelIndex(this); } else { assert(addr->gtGetOp2()->IsCnsIntOrI()); cnsOff = addr->gtGetOp2(); addr = nullptr; } } else { assert(addr->TypeIs(TYP_BYREF)); assert(addr->gtGetOp1()->TypeIs(TYP_REF)); addr = addr->gtGetOp2(); // Look for the constant [#FirstElem] node here, or as the RHS of an ADD. if (addr->IsCnsIntOrI()) { cnsOff = addr; addr = nullptr; } else { if ((addr->OperIs(GT_ADD)) && addr->gtGetOp2()->IsCnsIntOrI()) { cnsOff = addr->gtGetOp2(); addr = addr->gtGetOp1(); } // Label any constant array index contributions with #ConstantIndex and any LclVars with // GTF_VAR_ARR_INDEX addr->LabelIndex(this); } } } else if (addr->IsCnsIntOrI()) { cnsOff = addr; } FieldSeqNode* firstElemFseq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField); if ((cnsOff != nullptr) && (cnsOff->AsIntCon()->gtIconVal == elemOffs)) { // Assign it the [#FirstElem] field sequence // cnsOff->AsIntCon()->gtFieldSeq = firstElemFseq; } else // We have folded the first element's offset with the index expression { // Build the [#ConstantIndex, #FirstElem] field sequence // FieldSeqNode* constantIndexFseq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField); FieldSeqNode* fieldSeq = GetFieldSeqStore()->Append(constantIndexFseq, firstElemFseq); if (cnsOff == nullptr) // It must have folded into a zero offset { // Record in the general zero-offset map. fgAddFieldSeqForZeroOffset(addr, fieldSeq); } else { cnsOff->AsIntCon()->gtFieldSeq = fieldSeq; } } return tree; } #ifdef TARGET_X86 /***************************************************************************** * * Wrap fixed stack arguments for varargs functions to go through varargs * cookie to access them, except for the cookie itself. * * Non-x86 platforms are allowed to access all arguments directly * so we don't need this code. * */ GenTree* Compiler::fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs) { /* For the fixed stack arguments of a varargs function, we need to go through the varargs cookies to access them, except for the cookie itself */ LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvIsParam && !varDsc->lvIsRegArg && lclNum != lvaVarargsHandleArg) { // Create a node representing the local pointing to the base of the args GenTree* ptrArg = gtNewOperNode(GT_SUB, TYP_I_IMPL, gtNewLclvNode(lvaVarargsBaseOfStkArgs, TYP_I_IMPL), gtNewIconNode(varDsc->GetStackOffset() - codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES - lclOffs)); // Access the argument through the local GenTree* tree; if (varTypeIsStruct(varType)) { CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd(); assert(typeHnd != nullptr); tree = gtNewObjNode(typeHnd, ptrArg); } else { tree = gtNewOperNode(GT_IND, varType, ptrArg); } tree->gtFlags |= GTF_IND_TGTANYWHERE; if (varDsc->IsAddressExposed()) { tree->gtFlags |= GTF_GLOB_REF; } return fgMorphTree(tree); } return NULL; } #endif /***************************************************************************** * * Transform the given GT_LCL_VAR tree for code generation. */ GenTree* Compiler::fgMorphLocalVar(GenTree* tree, bool forceRemorph) { assert(tree->gtOper == GT_LCL_VAR); unsigned lclNum = tree->AsLclVarCommon()->GetLclNum(); var_types varType = lvaGetRealType(lclNum); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->IsAddressExposed()) { tree->gtFlags |= GTF_GLOB_REF; } #ifdef TARGET_X86 if (info.compIsVarArgs) { GenTree* newTree = fgMorphStackArgForVarArgs(lclNum, varType, 0); if (newTree != nullptr) { if (newTree->OperIsBlk() && ((tree->gtFlags & GTF_VAR_DEF) == 0)) { newTree->SetOper(GT_IND); } return newTree; } } #endif // TARGET_X86 /* If not during the global morphing phase bail */ if (!fgGlobalMorph && !forceRemorph) { return tree; } bool varAddr = (tree->gtFlags & GTF_DONT_CSE) != 0; noway_assert(!(tree->gtFlags & GTF_VAR_DEF) || varAddr); // GTF_VAR_DEF should always imply varAddr if (!varAddr && varDsc->lvNormalizeOnLoad()) { // TYP_BOOL quirk: previously, the code in optAssertionIsSubrange did not handle TYP_BOOL. // Now it does, but this leads to some regressions because we lose the uniform VNs for trees // that represent the "reduced" normalize-on-load locals, i. e. LCL_VAR(small type V00), created // here with local assertions, and "expanded", i. e. CAST(small type <- LCL_VAR(int V00)). // This is a pretty fundamental problem with how normalize-on-load locals appear to the optimizer. // This quirk preserves the previous behavior. // TODO-CQ: fix the VNs for normalize-on-load locals and remove this quirk. bool isBoolQuirk = varType == TYP_BOOL; // Assertion prop can tell us to omit adding a cast here. This is // useful when the local is a small-typed parameter that is passed in a // register: in that case, the ABI specifies that the upper bits might // be invalid, but the assertion guarantees us that we have normalized // when we wrote it. if (optLocalAssertionProp && !isBoolQuirk && optAssertionIsSubrange(tree, IntegralRange::ForType(varType), apFull) != NO_ASSERTION_INDEX) { // The previous assertion can guarantee us that if this node gets // assigned a register, it will be normalized already. It is still // possible that this node ends up being in memory, in which case // normalization will still be needed, so we better have the right // type. assert(tree->TypeGet() == varDsc->TypeGet()); return tree; } // Small-typed arguments and aliased locals are normalized on load. // Other small-typed locals are normalized on store. // Also, under the debugger as the debugger could write to the variable. // If this is one of the former, insert a narrowing cast on the load. // ie. Convert: var-short --> cast-short(var-int) tree->gtType = TYP_INT; fgMorphTreeDone(tree); tree = gtNewCastNode(TYP_INT, tree, false, varType); fgMorphTreeDone(tree); return tree; } return tree; } /***************************************************************************** Grab a temp for big offset morphing. This method will grab a new temp if no temp of this "type" has been created. Or it will return the same cached one if it has been created. */ unsigned Compiler::fgGetBigOffsetMorphingTemp(var_types type) { unsigned lclNum = fgBigOffsetMorphingTemps[type]; if (lclNum == BAD_VAR_NUM) { // We haven't created a temp for this kind of type. Create one now. lclNum = lvaGrabTemp(false DEBUGARG("Big Offset Morphing")); fgBigOffsetMorphingTemps[type] = lclNum; } else { // We better get the right type. noway_assert(lvaTable[lclNum].TypeGet() == type); } noway_assert(lclNum != BAD_VAR_NUM); return lclNum; } /***************************************************************************** * * Transform the given GT_FIELD tree for code generation. */ GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac) { assert(tree->gtOper == GT_FIELD); CORINFO_FIELD_HANDLE symHnd = tree->AsField()->gtFldHnd; unsigned fldOffset = tree->AsField()->gtFldOffset; GenTree* objRef = tree->AsField()->GetFldObj(); bool fieldMayOverlap = false; bool objIsLocal = false; if (fgGlobalMorph && (objRef != nullptr) && (objRef->gtOper == GT_ADDR)) { // Make sure we've checked if 'objRef' is an address of an implicit-byref parameter. // If it is, fgMorphImplicitByRefArgs may change it do a different opcode, which the // simd field rewrites are sensitive to. fgMorphImplicitByRefArgs(objRef); } noway_assert(((objRef != nullptr) && (objRef->IsLocalAddrExpr() != nullptr)) || ((tree->gtFlags & GTF_GLOB_REF) != 0)); if (tree->AsField()->gtFldMayOverlap) { fieldMayOverlap = true; // Reset the flag because we may reuse the node. tree->AsField()->gtFldMayOverlap = false; } #ifdef FEATURE_SIMD // if this field belongs to simd struct, translate it to simd intrinsic. if (mac == nullptr) { if (IsBaselineSimdIsaSupported()) { GenTree* newTree = fgMorphFieldToSimdGetElement(tree); if (newTree != tree) { newTree = fgMorphTree(newTree); return newTree; } } } else if ((objRef != nullptr) && (objRef->OperGet() == GT_ADDR) && varTypeIsSIMD(objRef->gtGetOp1())) { GenTreeLclVarCommon* lcl = objRef->IsLocalAddrExpr(); if (lcl != nullptr) { lvaSetVarDoNotEnregister(lcl->GetLclNum() DEBUGARG(DoNotEnregisterReason::LocalField)); } } #endif // Create a default MorphAddrContext early so it doesn't go out of scope // before it is used. MorphAddrContext defMAC(MACK_Ind); /* Is this an instance data member? */ if (objRef) { GenTree* addr; objIsLocal = objRef->IsLocal(); if (tree->gtFlags & GTF_IND_TLS_REF) { NO_WAY("instance field can not be a TLS ref."); } /* We'll create the expression "*(objRef + mem_offs)" */ noway_assert(varTypeIsGC(objRef->TypeGet()) || objRef->TypeGet() == TYP_I_IMPL); /* Now we have a tree like this: +--------------------+ | GT_FIELD | tree +----------+---------+ | +--------------+-------------+ |tree->AsField()->GetFldObj()| +--------------+-------------+ We want to make it like this (when fldOffset is <= MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT): +--------------------+ | GT_IND/GT_OBJ | tree +---------+----------+ | | +---------+----------+ | GT_ADD | addr +---------+----------+ | / \ / \ / \ +-------------------+ +----------------------+ | objRef | | fldOffset | | | | (when fldOffset !=0) | +-------------------+ +----------------------+ or this (when fldOffset is > MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT): +--------------------+ | GT_IND/GT_OBJ | tree +----------+---------+ | +----------+---------+ | GT_COMMA | comma2 +----------+---------+ | / \ / \ / \ / \ +---------+----------+ +---------+----------+ comma | GT_COMMA | | "+" (i.e. GT_ADD) | addr +---------+----------+ +---------+----------+ | | / \ / \ / \ / \ / \ / \ +-----+-----+ +-----+-----+ +---------+ +-----------+ asg | GT_ASG | ind | GT_IND | | tmpLcl | | fldOffset | +-----+-----+ +-----+-----+ +---------+ +-----------+ | | / \ | / \ | / \ | +-----+-----+ +-----+-----+ +-----------+ | tmpLcl | | objRef | | tmpLcl | +-----------+ +-----------+ +-----------+ */ var_types objRefType = objRef->TypeGet(); GenTree* comma = nullptr; // NULL mac means we encounter the GT_FIELD first. This denotes a dereference of the field, // and thus is equivalent to a MACK_Ind with zero offset. if (mac == nullptr) { mac = &defMAC; } // This flag is set to enable the "conservative" style of explicit null-check insertion. // This means that we insert an explicit null check whenever we create byref by adding a // constant offset to a ref, in a MACK_Addr context (meaning that the byref is not immediately // dereferenced). The alternative is "aggressive", which would not insert such checks (for // small offsets); in this plan, we would transfer some null-checking responsibility to // callee's of methods taking byref parameters. They would have to add explicit null checks // when creating derived byrefs from argument byrefs by adding constants to argument byrefs, in // contexts where the resulting derived byref is not immediately dereferenced (or if the offset is too // large). To make the "aggressive" scheme work, however, we'd also have to add explicit derived-from-null // checks for byref parameters to "external" methods implemented in C++, and in P/Invoke stubs. // This is left here to point out how to implement it. CLANG_FORMAT_COMMENT_ANCHOR; #define CONSERVATIVE_NULL_CHECK_BYREF_CREATION 1 bool addExplicitNullCheck = false; // Implicit byref locals and string literals are never null. if (fgAddrCouldBeNull(objRef)) { // If the objRef is a GT_ADDR node, it, itself, never requires null checking. The expression // whose address is being taken is either a local or static variable, whose address is necessarily // non-null, or else it is a field dereference, which will do its own bounds checking if necessary. if (objRef->gtOper != GT_ADDR && (mac->m_kind == MACK_Addr || mac->m_kind == MACK_Ind)) { if (!mac->m_allConstantOffsets || fgIsBigOffset(mac->m_totalOffset + fldOffset)) { addExplicitNullCheck = true; } else { // In R2R mode the field offset for some fields may change when the code // is loaded. So we can't rely on a zero offset here to suppress the null check. // // See GitHub issue #16454. bool fieldHasChangeableOffset = false; #ifdef FEATURE_READYTORUN fieldHasChangeableOffset = (tree->AsField()->gtFieldLookup.addr != nullptr); #endif #if CONSERVATIVE_NULL_CHECK_BYREF_CREATION addExplicitNullCheck = (mac->m_kind == MACK_Addr) && ((mac->m_totalOffset + fldOffset > 0) || fieldHasChangeableOffset); #else addExplicitNullCheck = (objRef->gtType == TYP_BYREF && mac->m_kind == MACK_Addr && ((mac->m_totalOffset + fldOffset > 0) || fieldHasChangeableOffset)); #endif } } } if (addExplicitNullCheck) { #ifdef DEBUG if (verbose) { printf("Before explicit null check morphing:\n"); gtDispTree(tree); } #endif // // Create the "comma" subtree // GenTree* asg = nullptr; GenTree* nullchk; unsigned lclNum; if (objRef->gtOper != GT_LCL_VAR) { lclNum = fgGetBigOffsetMorphingTemp(genActualType(objRef->TypeGet())); // Create the "asg" node asg = gtNewTempAssign(lclNum, objRef); } else { lclNum = objRef->AsLclVarCommon()->GetLclNum(); } GenTree* lclVar = gtNewLclvNode(lclNum, objRefType); nullchk = gtNewNullCheck(lclVar, compCurBB); nullchk->gtFlags |= GTF_DONT_CSE; // Don't try to create a CSE for these TYP_BYTE indirections if (asg) { // Create the "comma" node. comma = gtNewOperNode(GT_COMMA, TYP_VOID, // We don't want to return anything from this "comma" node. // Set the type to TYP_VOID, so we can select "cmp" instruction // instead of "mov" instruction later on. asg, nullchk); } else { comma = nullchk; } addr = gtNewLclvNode(lclNum, objRefType); // Use "tmpLcl" to create "addr" node. } else { addr = objRef; } #ifdef FEATURE_READYTORUN if (tree->AsField()->gtFieldLookup.addr != nullptr) { GenTree* offsetNode = nullptr; if (tree->AsField()->gtFieldLookup.accessType == IAT_PVALUE) { offsetNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)tree->AsField()->gtFieldLookup.addr, GTF_ICON_CONST_PTR, true); #ifdef DEBUG offsetNode->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)symHnd; #endif } else { noway_assert(!"unexpected accessType for R2R field access"); } var_types addType = (objRefType == TYP_I_IMPL) ? TYP_I_IMPL : TYP_BYREF; addr = gtNewOperNode(GT_ADD, addType, addr, offsetNode); } #endif if (fldOffset != 0) { // Generate the "addr" node. /* Add the member offset to the object's address */ FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd); addr = gtNewOperNode(GT_ADD, (var_types)(objRefType == TYP_I_IMPL ? TYP_I_IMPL : TYP_BYREF), addr, gtNewIconHandleNode(fldOffset, GTF_ICON_FIELD_OFF, fieldSeq)); } // Now let's set the "tree" as a GT_IND tree. tree->SetOper(GT_IND); tree->AsOp()->gtOp1 = addr; tree->SetIndirExceptionFlags(this); if (addExplicitNullCheck) { // // Create "comma2" node and link it to "tree". // GenTree* comma2; comma2 = gtNewOperNode(GT_COMMA, addr->TypeGet(), // The type of "comma2" node is the same as the type of "addr" node. comma, addr); tree->AsOp()->gtOp1 = comma2; } #ifdef DEBUG if (verbose) { if (addExplicitNullCheck) { printf("After adding explicit null check:\n"); gtDispTree(tree); } } #endif } else /* This is a static data member */ { if (tree->gtFlags & GTF_IND_TLS_REF) { // Thread Local Storage static field reference // // Field ref is a TLS 'Thread-Local-Storage' reference // // Build this tree: IND(*) # // | // ADD(I_IMPL) // / \. // / CNS(fldOffset) // / // / // / // IND(I_IMPL) == [Base of this DLL's TLS] // | // ADD(I_IMPL) // / \. // / CNS(IdValue*4) or MUL // / / \. // IND(I_IMPL) / CNS(4) // | / // CNS(TLS_HDL,0x2C) IND // | // CNS(pIdAddr) // // # Denotes the orginal node // void** pIdAddr = nullptr; unsigned IdValue = info.compCompHnd->getFieldThreadLocalStoreID(symHnd, (void**)&pIdAddr); // // If we can we access the TLS DLL index ID value directly // then pIdAddr will be NULL and // IdValue will be the actual TLS DLL index ID // GenTree* dllRef = nullptr; if (pIdAddr == nullptr) { if (IdValue != 0) { dllRef = gtNewIconNode(IdValue * 4, TYP_I_IMPL); } } else { dllRef = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)pIdAddr, GTF_ICON_CONST_PTR, true); // Next we multiply by 4 dllRef = gtNewOperNode(GT_MUL, TYP_I_IMPL, dllRef, gtNewIconNode(4, TYP_I_IMPL)); } #define WIN32_TLS_SLOTS (0x2C) // Offset from fs:[0] where the pointer to the slots resides // Mark this ICON as a TLS_HDL, codegen will use FS:[cns] GenTree* tlsRef = gtNewIconHandleNode(WIN32_TLS_SLOTS, GTF_ICON_TLS_HDL); // Translate GTF_FLD_INITCLASS to GTF_ICON_INITCLASS if ((tree->gtFlags & GTF_FLD_INITCLASS) != 0) { tree->gtFlags &= ~GTF_FLD_INITCLASS; tlsRef->gtFlags |= GTF_ICON_INITCLASS; } tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef); if (dllRef != nullptr) { /* Add the dllRef */ tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, dllRef); } /* indirect to have tlsRef point at the base of the DLLs Thread Local Storage */ tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef); if (fldOffset != 0) { FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd); GenTree* fldOffsetNode = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, fldOffset, fieldSeq); /* Add the TLS static field offset to the address */ tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, fldOffsetNode); } // Final indirect to get to actual value of TLS static field tree->SetOper(GT_IND); tree->AsOp()->gtOp1 = tlsRef; noway_assert(tree->gtFlags & GTF_IND_TLS_REF); } else { assert(!fieldMayOverlap); // Normal static field reference // // If we can we access the static's address directly // then pFldAddr will be NULL and // fldAddr will be the actual address of the static field // void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(symHnd, (void**)&pFldAddr); // We should always be able to access this static field address directly // assert(pFldAddr == nullptr); // For boxed statics, this direct address will be for the box. We have already added // the indirection for the field itself and attached the sequence, in importation. bool isBoxedStatic = gtIsStaticFieldPtrToBoxedStruct(tree->TypeGet(), symHnd); FieldSeqNode* fldSeq = !isBoxedStatic ? GetFieldSeqStore()->CreateSingleton(symHnd) : FieldSeqStore::NotAField(); // TODO-CQ: enable this optimization for 32 bit targets. bool isStaticReadOnlyInited = false; #ifdef TARGET_64BIT if (tree->TypeIs(TYP_REF) && !isBoxedStatic) { bool pIsSpeculative = true; if (info.compCompHnd->getStaticFieldCurrentClass(symHnd, &pIsSpeculative) != NO_CLASS_HANDLE) { isStaticReadOnlyInited = !pIsSpeculative; } } #endif // TARGET_64BIT // TODO: choices made below have mostly historical reasons and // should be unified to always use the IND(<address>) form. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT bool preferIndir = isBoxedStatic || isStaticReadOnlyInited || (IMAGE_REL_BASED_REL32 != eeGetRelocTypeHint(fldAddr)); #else // !TARGET_64BIT bool preferIndir = isBoxedStatic; #endif // !TARGET_64BIT if (preferIndir) { GenTreeFlags handleKind = GTF_EMPTY; if (isBoxedStatic) { handleKind = GTF_ICON_STATIC_BOX_PTR; } else if (isStaticReadOnlyInited) { handleKind = GTF_ICON_CONST_PTR; } else { handleKind = GTF_ICON_STATIC_HDL; } GenTree* addr = gtNewIconHandleNode((size_t)fldAddr, handleKind, fldSeq); // Translate GTF_FLD_INITCLASS to GTF_ICON_INITCLASS, if we need to. if (((tree->gtFlags & GTF_FLD_INITCLASS) != 0) && !isStaticReadOnlyInited) { tree->gtFlags &= ~GTF_FLD_INITCLASS; addr->gtFlags |= GTF_ICON_INITCLASS; } tree->SetOper(GT_IND); tree->AsOp()->gtOp1 = addr; if (isBoxedStatic) { // The box for the static cannot be null, and is logically invariant, since it // represents (a base for) the static's address. tree->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); } else if (isStaticReadOnlyInited) { JITDUMP("Marking initialized static read-only field '%s' as invariant.\n", eeGetFieldName(symHnd)); // Static readonly field is not null at this point (see getStaticFieldCurrentClass impl). tree->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); } return fgMorphSmpOp(tree); } else { // Only volatile or classinit could be set, and they map over noway_assert((tree->gtFlags & ~(GTF_FLD_VOLATILE | GTF_FLD_INITCLASS | GTF_COMMON_MASK)) == 0); static_assert_no_msg(GTF_FLD_VOLATILE == GTF_CLS_VAR_VOLATILE); static_assert_no_msg(GTF_FLD_INITCLASS == GTF_CLS_VAR_INITCLASS); tree->SetOper(GT_CLS_VAR); tree->AsClsVar()->gtClsVarHnd = symHnd; tree->AsClsVar()->gtFieldSeq = fldSeq; } return tree; } } noway_assert(tree->gtOper == GT_IND); if (fldOffset == 0) { GenTree* addr = tree->AsOp()->gtOp1; // 'addr' may be a GT_COMMA. Skip over any comma nodes addr = addr->gtEffectiveVal(); #ifdef DEBUG if (verbose) { printf("\nBefore calling fgAddFieldSeqForZeroOffset:\n"); gtDispTree(tree); } #endif // We expect 'addr' to be an address at this point. assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF); // Since we don't make a constant zero to attach the field sequence to, associate it with the "addr" node. FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd); fgAddFieldSeqForZeroOffset(addr, fieldSeq); } // Pass down the current mac; if non null we are computing an address GenTree* result = fgMorphSmpOp(tree, mac); #ifdef DEBUG if (verbose) { printf("\nFinal value of Compiler::fgMorphField after calling fgMorphSmpOp:\n"); gtDispTree(result); } #endif return result; } //------------------------------------------------------------------------------ // fgMorphCallInline: attempt to inline a call // // Arguments: // call - call expression to inline, inline candidate // inlineResult - result tracking and reporting // // Notes: // Attempts to inline the call. // // If successful, callee's IR is inserted in place of the call, and // is marked with an InlineContext. // // If unsuccessful, the transformations done in anticipation of a // possible inline are undone, and the candidate flag on the call // is cleared. void Compiler::fgMorphCallInline(GenTreeCall* call, InlineResult* inlineResult) { bool inliningFailed = false; // Is this call an inline candidate? if (call->IsInlineCandidate()) { InlineContext* createdContext = nullptr; // Attempt the inline fgMorphCallInlineHelper(call, inlineResult, &createdContext); // We should have made up our minds one way or another.... assert(inlineResult->IsDecided()); // If we failed to inline, we have a bit of work to do to cleanup if (inlineResult->IsFailure()) { if (createdContext != nullptr) { // We created a context before we got to the failure, so mark // it as failed in the tree. createdContext->SetFailed(inlineResult); } else { #ifdef DEBUG // In debug we always put all inline attempts into the inline tree. InlineContext* ctx = m_inlineStrategy->NewContext(call->gtInlineCandidateInfo->inlinersContext, fgMorphStmt, call); ctx->SetFailed(inlineResult); #endif } inliningFailed = true; // Clear the Inline Candidate flag so we can ensure later we tried // inlining all candidates. // call->gtFlags &= ~GTF_CALL_INLINE_CANDIDATE; } } else { // This wasn't an inline candidate. So it must be a GDV candidate. assert(call->IsGuardedDevirtualizationCandidate()); // We already know we can't inline this call, so don't even bother to try. inliningFailed = true; } // If we failed to inline (or didn't even try), do some cleanup. if (inliningFailed) { if (call->gtReturnType != TYP_VOID) { JITDUMP("Inlining [%06u] failed, so bashing " FMT_STMT " to NOP\n", dspTreeID(call), fgMorphStmt->GetID()); // Detach the GT_CALL tree from the original statement by // hanging a "nothing" node to it. Later the "nothing" node will be removed // and the original GT_CALL tree will be picked up by the GT_RET_EXPR node. noway_assert(fgMorphStmt->GetRootNode() == call); fgMorphStmt->SetRootNode(gtNewNothingNode()); } } } //------------------------------------------------------------------------------ // fgMorphCallInlineHelper: Helper to attempt to inline a call // // Arguments: // call - call expression to inline, inline candidate // result - result to set to success or failure // createdContext - The context that was created if the inline attempt got to the inliner. // // Notes: // Attempts to inline the call. // // If successful, callee's IR is inserted in place of the call, and // is marked with an InlineContext. // // If unsuccessful, the transformations done in anticipation of a // possible inline are undone, and the candidate flag on the call // is cleared. // // If a context was created because we got to the importer then it is output by this function. // If the inline succeeded, this context will already be marked as successful. If it failed and // a context is returned, then it will not have been marked as success or failed. void Compiler::fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext) { // Don't expect any surprises here. assert(result->IsCandidate()); if (lvaCount >= MAX_LV_NUM_COUNT_FOR_INLINING) { // For now, attributing this to call site, though it's really // more of a budget issue (lvaCount currently includes all // caller and prospective callee locals). We still might be // able to inline other callees into this caller, or inline // this callee in other callers. result->NoteFatal(InlineObservation::CALLSITE_TOO_MANY_LOCALS); return; } if (call->IsVirtual()) { result->NoteFatal(InlineObservation::CALLSITE_IS_VIRTUAL); return; } // Re-check this because guarded devirtualization may allow these through. if (gtIsRecursiveCall(call) && call->IsImplicitTailCall()) { result->NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL); return; } // impMarkInlineCandidate() is expected not to mark tail prefixed calls // and recursive tail calls as inline candidates. noway_assert(!call->IsTailPrefixedCall()); noway_assert(!call->IsImplicitTailCall() || !gtIsRecursiveCall(call)); // // Calling inlinee's compiler to inline the method. // unsigned startVars = lvaCount; #ifdef DEBUG if (verbose) { printf("Expanding INLINE_CANDIDATE in statement "); printStmtID(fgMorphStmt); printf(" in " FMT_BB ":\n", compCurBB->bbNum); gtDispStmt(fgMorphStmt); if (call->IsImplicitTailCall()) { printf("Note: candidate is implicit tail call\n"); } } #endif impInlineRoot()->m_inlineStrategy->NoteAttempt(result); // // Invoke the compiler to inline the call. // fgInvokeInlineeCompiler(call, result, createdContext); if (result->IsFailure()) { // Undo some changes made in anticipation of inlining... // Zero out the used locals memset(lvaTable + startVars, 0, (lvaCount - startVars) * sizeof(*lvaTable)); for (unsigned i = startVars; i < lvaCount; i++) { new (&lvaTable[i], jitstd::placement_t()) LclVarDsc(); // call the constructor. } lvaCount = startVars; #ifdef DEBUG if (verbose) { // printf("Inlining failed. Restore lvaCount to %d.\n", lvaCount); } #endif return; } #ifdef DEBUG if (verbose) { // printf("After inlining lvaCount=%d.\n", lvaCount); } #endif } //------------------------------------------------------------------------ // fgCanFastTailCall: Check to see if this tail call can be optimized as epilog+jmp. // // Arguments: // callee - The callee to check // failReason - If this method returns false, the reason why. Can be nullptr. // // Return Value: // Returns true or false based on whether the callee can be fastTailCalled // // Notes: // This function is target specific and each target will make the fastTailCall // decision differently. See the notes below. // // This function calls fgInitArgInfo() to initialize the arg info table, which // is used to analyze the argument. This function can alter the call arguments // by adding argument IR nodes for non-standard arguments. // // Windows Amd64: // A fast tail call can be made whenever the number of callee arguments // is less than or equal to the number of caller arguments, or we have four // or fewer callee arguments. This is because, on Windows AMD64, each // argument uses exactly one register or one 8-byte stack slot. Thus, we only // need to count arguments, and not be concerned with the size of each // incoming or outgoing argument. // // Can fast tail call examples (amd64 Windows): // // -- Callee will have all register arguments -- // caller(int, int, int, int) // callee(int, int, float, int) // // -- Callee requires stack space that is equal or less than the caller -- // caller(struct, struct, struct, struct, struct, struct) // callee(int, int, int, int, int, int) // // -- Callee requires stack space that is less than the caller -- // caller(struct, double, struct, float, struct, struct) // callee(int, int, int, int, int) // // -- Callee will have all register arguments -- // caller(int) // callee(int, int, int, int) // // Cannot fast tail call examples (amd64 Windows): // // -- Callee requires stack space that is larger than the caller -- // caller(struct, double, struct, float, struct, struct) // callee(int, int, int, int, int, double, double, double) // // -- Callee has a byref struct argument -- // caller(int, int, int) // callee(struct(size 3 bytes)) // // Unix Amd64 && Arm64: // A fastTailCall decision can be made whenever the callee's stack space is // less than or equal to the caller's stack space. There are many permutations // of when the caller and callee have different stack sizes if there are // structs being passed to either the caller or callee. // // Exceptions: // If the callee has a 9 to 16 byte struct argument and the callee has // stack arguments, the decision will be to not fast tail call. This is // because before fgMorphArgs is done, the struct is unknown whether it // will be placed on the stack or enregistered. Therefore, the conservative // decision of do not fast tail call is taken. This limitations should be // removed if/when fgMorphArgs no longer depends on fgCanFastTailCall. // // Can fast tail call examples (amd64 Unix): // // -- Callee will have all register arguments -- // caller(int, int, int, int) // callee(int, int, float, int) // // -- Callee requires stack space that is equal to the caller -- // caller({ long, long }, { int, int }, { int }, { int }, { int }, { int }) -- 6 int register arguments, 16 byte // stack // space // callee(int, int, int, int, int, int, int, int) -- 6 int register arguments, 16 byte stack space // // -- Callee requires stack space that is less than the caller -- // caller({ long, long }, int, { long, long }, int, { long, long }, { long, long }) 6 int register arguments, 32 byte // stack // space // callee(int, int, int, int, int, int, { long, long } ) // 6 int register arguments, 16 byte stack space // // -- Callee will have all register arguments -- // caller(int) // callee(int, int, int, int) // // Cannot fast tail call examples (amd64 Unix): // // -- Callee requires stack space that is larger than the caller -- // caller(float, float, float, float, float, float, float, float) -- 8 float register arguments // callee(int, int, int, int, int, int, int, int) -- 6 int register arguments, 16 byte stack space // // -- Callee has structs which cannot be enregistered (Implementation Limitation) -- // caller(float, float, float, float, float, float, float, float, { double, double, double }) -- 8 float register // arguments, 24 byte stack space // callee({ double, double, double }) -- 24 bytes stack space // // -- Callee requires stack space and has a struct argument >8 bytes and <16 bytes (Implementation Limitation) -- // caller(int, int, int, int, int, int, { double, double, double }) -- 6 int register arguments, 24 byte stack space // callee(int, int, int, int, int, int, { int, int }) -- 6 int registers, 16 byte stack space // // -- Caller requires stack space and nCalleeArgs > nCallerArgs (Bug) -- // caller({ double, double, double, double, double, double }) // 48 byte stack // callee(int, int) -- 2 int registers bool Compiler::fgCanFastTailCall(GenTreeCall* callee, const char** failReason) { #if FEATURE_FASTTAILCALL // To reach here means that the return types of the caller and callee are tail call compatible. // In the case of structs that can be returned in a register, compRetNativeType is set to the actual return type. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (callee->IsTailPrefixedCall()) { var_types retType = info.compRetType; assert(impTailCallRetTypeCompatible(false, retType, info.compMethodInfo->args.retTypeClass, info.compCallConv, (var_types)callee->gtReturnType, callee->gtRetClsHnd, callee->GetUnmanagedCallConv())); } #endif assert(!callee->AreArgsComplete()); fgInitArgInfo(callee); fgArgInfo* argInfo = callee->fgArgInfo; unsigned calleeArgStackSize = 0; unsigned callerArgStackSize = info.compArgStackSize; for (unsigned index = 0; index < argInfo->ArgCount(); ++index) { fgArgTabEntry* arg = argInfo->GetArgEntry(index, false); calleeArgStackSize = roundUp(calleeArgStackSize, arg->GetByteAlignment()); calleeArgStackSize += arg->GetStackByteSize(); } calleeArgStackSize = GetOutgoingArgByteSize(calleeArgStackSize); auto reportFastTailCallDecision = [&](const char* thisFailReason) { if (failReason != nullptr) { *failReason = thisFailReason; } #ifdef DEBUG if ((JitConfig.JitReportFastTailCallDecisions()) == 1) { if (callee->gtCallType != CT_INDIRECT) { const char* methodName; methodName = eeGetMethodFullName(callee->gtCallMethHnd); printf("[Fast tailcall decision]: Caller: %s\n[Fast tailcall decision]: Callee: %s -- Decision: ", info.compFullName, methodName); } else { printf("[Fast tailcall decision]: Caller: %s\n[Fast tailcall decision]: Callee: IndirectCall -- " "Decision: ", info.compFullName); } if (thisFailReason == nullptr) { printf("Will fast tailcall"); } else { printf("Will not fast tailcall (%s)", thisFailReason); } printf(" (CallerArgStackSize: %d, CalleeArgStackSize: %d)\n\n", callerArgStackSize, calleeArgStackSize); } else { if (thisFailReason == nullptr) { JITDUMP("[Fast tailcall decision]: Will fast tailcall\n"); } else { JITDUMP("[Fast tailcall decision]: Will not fast tailcall (%s)\n", thisFailReason); } } #endif // DEBUG }; if (!opts.compFastTailCalls) { reportFastTailCallDecision("Configuration doesn't allow fast tail calls"); return false; } if (callee->IsStressTailCall()) { reportFastTailCallDecision("Fast tail calls are not performed under tail call stress"); return false; } // Note on vararg methods: // If the caller is vararg method, we don't know the number of arguments passed by caller's caller. // But we can be sure that in-coming arg area of vararg caller would be sufficient to hold its // fixed args. Therefore, we can allow a vararg method to fast tail call other methods as long as // out-going area required for callee is bounded by caller's fixed argument space. // // Note that callee being a vararg method is not a problem since we can account the params being passed. // // We will currently decide to not fast tail call on Windows armarch if the caller or callee is a vararg // method. This is due to the ABI differences for native vararg methods for these platforms. There is // work required to shuffle arguments to the correct locations. CLANG_FORMAT_COMMENT_ANCHOR; if (TargetOS::IsWindows && TargetArchitecture::IsArmArch && (info.compIsVarArgs || callee->IsVarargs())) { reportFastTailCallDecision("Fast tail calls with varargs not supported on Windows ARM/ARM64"); return false; } if (compLocallocUsed) { reportFastTailCallDecision("Localloc used"); return false; } #ifdef TARGET_AMD64 // Needed for Jit64 compat. // In future, enabling fast tail calls from methods that need GS cookie // check would require codegen side work to emit GS cookie check before a // tail call. if (getNeedsGSSecurityCookie()) { reportFastTailCallDecision("GS Security cookie check required"); return false; } #endif // If the NextCallReturnAddress intrinsic is used we should do normal calls. if (info.compHasNextCallRetAddr) { reportFastTailCallDecision("Uses NextCallReturnAddress intrinsic"); return false; } if (callee->HasRetBufArg()) // RetBuf { // If callee has RetBuf param, caller too must have it. // Otherwise go the slow route. if (info.compRetBuffArg == BAD_VAR_NUM) { reportFastTailCallDecision("Callee has RetBuf but caller does not."); return false; } } // For a fast tail call the caller will use its incoming arg stack space to place // arguments, so if the callee requires more arg stack space than is available here // the fast tail call cannot be performed. This is common to all platforms. // Note that the GC'ness of on stack args need not match since the arg setup area is marked // as non-interruptible for fast tail calls. if (calleeArgStackSize > callerArgStackSize) { reportFastTailCallDecision("Not enough incoming arg space"); return false; } // For Windows some struct parameters are copied on the local frame // and then passed by reference. We cannot fast tail call in these situation // as we need to keep our frame around. if (fgCallHasMustCopyByrefParameter(callee)) { reportFastTailCallDecision("Callee has a byref parameter"); return false; } reportFastTailCallDecision(nullptr); return true; #else // FEATURE_FASTTAILCALL if (failReason) *failReason = "Fast tailcalls are not supported on this platform"; return false; #endif } //------------------------------------------------------------------------ // fgCallHasMustCopyByrefParameter: Check to see if this call has a byref parameter that // requires a struct copy in the caller. // // Arguments: // callee - The callee to check // // Return Value: // Returns true or false based on whether this call has a byref parameter that // requires a struct copy in the caller. #if FEATURE_FASTTAILCALL bool Compiler::fgCallHasMustCopyByrefParameter(GenTreeCall* callee) { fgArgInfo* argInfo = callee->fgArgInfo; bool hasMustCopyByrefParameter = false; for (unsigned index = 0; index < argInfo->ArgCount(); ++index) { fgArgTabEntry* arg = argInfo->GetArgEntry(index, false); if (arg->isStruct) { if (arg->passedByRef) { // Generally a byref arg will block tail calling, as we have to // make a local copy of the struct for the callee. hasMustCopyByrefParameter = true; // If we're optimizing, we may be able to pass our caller's byref to our callee, // and so still be able to avoid a struct copy. if (opts.OptimizationEnabled()) { // First, see if this arg is an implicit byref param. GenTreeLclVar* const lcl = arg->GetNode()->IsImplicitByrefParameterValue(this); if (lcl != nullptr) { // Yes, the arg is an implicit byref param. const unsigned lclNum = lcl->GetLclNum(); LclVarDsc* const varDsc = lvaGetDesc(lcl); // The param must not be promoted; if we've promoted, then the arg will be // a local struct assembled from the promoted fields. if (varDsc->lvPromoted) { JITDUMP("Arg [%06u] is promoted implicit byref V%02u, so no tail call\n", dspTreeID(arg->GetNode()), lclNum); } else { JITDUMP("Arg [%06u] is unpromoted implicit byref V%02u, seeing if we can still tail call\n", dspTreeID(arg->GetNode()), lclNum); // We have to worry about introducing aliases if we bypass copying // the struct at the call. We'll do some limited analysis to see if we // can rule this out. const unsigned argLimit = 6; // If this is the only appearance of the byref in the method, then // aliasing is not possible. // // If no other call arg refers to this byref, and no other arg is // a pointer which could refer to this byref, we can optimize. // // We only check this for calls with small numbers of arguments, // as the analysis cost will be quadratic. // const unsigned totalAppearances = varDsc->lvRefCnt(RCS_EARLY); const unsigned callAppearances = (unsigned)varDsc->lvRefCntWtd(RCS_EARLY); assert(totalAppearances >= callAppearances); if (totalAppearances == 1) { JITDUMP("... yes, arg is the only appearance of V%02u\n", lclNum); hasMustCopyByrefParameter = false; } else if (totalAppearances > callAppearances) { // lvRefCntWtd tracks the number of appearances of the arg at call sites. // If this number doesn't match the regular ref count, there is // a non-call appearance, and we must be conservative. // JITDUMP("... no, arg has %u non-call appearance(s)\n", totalAppearances - callAppearances); } else if (argInfo->ArgCount() <= argLimit) { JITDUMP("... all %u appearance(s) are as implicit byref args to calls.\n" "... Running alias analysis on this call's args\n", totalAppearances); GenTree* interferingArg = nullptr; for (unsigned index2 = 0; index2 < argInfo->ArgCount(); ++index2) { if (index2 == index) { continue; } fgArgTabEntry* const arg2 = argInfo->GetArgEntry(index2, false); JITDUMP("... checking other arg [%06u]...\n", dspTreeID(arg2->GetNode())); DISPTREE(arg2->GetNode()); // Do we pass 'lcl' more than once to the callee? if (arg2->isStruct && arg2->passedByRef) { GenTreeLclVarCommon* const lcl2 = arg2->GetNode()->IsImplicitByrefParameterValue(this); if ((lcl2 != nullptr) && (lclNum == lcl2->GetLclNum())) { // not copying would introduce aliased implicit byref structs // in the callee ... we can't optimize. interferingArg = arg2->GetNode(); break; } else { JITDUMP("... arg refers to different implicit byref V%02u\n", lcl2->GetLclNum()); continue; } } // Do we pass a byref pointer which might point within 'lcl'? // // We can assume the 'lcl' is unaliased on entry to the // method, so the only way we can have an aliasing byref pointer at // the call is if 'lcl' is address taken/exposed in the method. // // Note even though 'lcl' is not promoted, we are in the middle // of the promote->rewrite->undo->(morph)->demote cycle, and so // might see references to promoted fields of 'lcl' that haven't yet // been demoted (see fgMarkDemotedImplicitByRefArgs). // // So, we also need to scan all 'lcl's fields, if any, to see if they // are exposed. // // When looking for aliases from other args, we check for both TYP_BYREF // and TYP_I_IMPL typed args here. Conceptually anything that points into // an implicit byref parameter should be TYP_BYREF, as these parameters could // refer to boxed heap locations (say if the method is invoked by reflection) // but there are some stack only structs (like typed references) where // the importer/runtime code uses TYP_I_IMPL, and fgInitArgInfo will // transiently retype all simple address-of implicit parameter args as // TYP_I_IMPL. // if ((arg2->argType == TYP_BYREF) || (arg2->argType == TYP_I_IMPL)) { JITDUMP("...arg is a byref, must run an alias check\n"); bool checkExposure = true; bool hasExposure = false; // See if there is any way arg could refer to a parameter struct. GenTree* arg2Node = arg2->GetNode(); if (arg2Node->OperIs(GT_LCL_VAR)) { GenTreeLclVarCommon* arg2LclNode = arg2Node->AsLclVarCommon(); assert(arg2LclNode->GetLclNum() != lclNum); LclVarDsc* arg2Dsc = lvaGetDesc(arg2LclNode); // Other params can't alias implicit byref params if (arg2Dsc->lvIsParam) { checkExposure = false; } } // Because we're checking TYP_I_IMPL above, at least // screen out obvious things that can't cause aliases. else if (arg2Node->IsIntegralConst()) { checkExposure = false; } if (checkExposure) { JITDUMP( "... not sure where byref arg points, checking if V%02u is exposed\n", lclNum); // arg2 might alias arg, see if we've exposed // arg somewhere in the method. if (varDsc->lvHasLdAddrOp || varDsc->IsAddressExposed()) { // Struct as a whole is exposed, can't optimize JITDUMP("... V%02u is exposed\n", lclNum); hasExposure = true; } else if (varDsc->lvFieldLclStart != 0) { // This is the promoted/undone struct case. // // The field start is actually the local number of the promoted local, // use it to enumerate the fields. const unsigned promotedLcl = varDsc->lvFieldLclStart; LclVarDsc* const promotedVarDsc = lvaGetDesc(promotedLcl); JITDUMP("...promoted-unpromoted case -- also checking exposure of " "fields of V%02u\n", promotedLcl); for (unsigned fieldIndex = 0; fieldIndex < promotedVarDsc->lvFieldCnt; fieldIndex++) { LclVarDsc* fieldDsc = lvaGetDesc(promotedVarDsc->lvFieldLclStart + fieldIndex); if (fieldDsc->lvHasLdAddrOp || fieldDsc->IsAddressExposed()) { // Promoted and not yet demoted field is exposed, can't optimize JITDUMP("... field V%02u is exposed\n", promotedVarDsc->lvFieldLclStart + fieldIndex); hasExposure = true; break; } } } } if (hasExposure) { interferingArg = arg2->GetNode(); break; } } else { JITDUMP("...arg is not a byref or implicit byref (%s)\n", varTypeName(arg2->GetNode()->TypeGet())); } } if (interferingArg != nullptr) { JITDUMP("... no, arg [%06u] may alias with V%02u\n", dspTreeID(interferingArg), lclNum); } else { JITDUMP("... yes, no other arg in call can alias V%02u\n", lclNum); hasMustCopyByrefParameter = false; } } else { JITDUMP(" ... no, call has %u > %u args, alias analysis deemed too costly\n", argInfo->ArgCount(), argLimit); } } } } if (hasMustCopyByrefParameter) { // This arg requires a struct copy. No reason to keep scanning the remaining args. break; } } } } return hasMustCopyByrefParameter; } #endif //------------------------------------------------------------------------ // fgMorphPotentialTailCall: Attempt to morph a call that the importer has // identified as a potential tailcall to an actual tailcall and return the // placeholder node to use in this case. // // Arguments: // call - The call to morph. // // Return Value: // Returns a node to use if the call was morphed into a tailcall. If this // function returns a node the call is done being morphed and the new node // should be used. Otherwise the call will have been demoted to a regular call // and should go through normal morph. // // Notes: // This is called only for calls that the importer has already identified as // potential tailcalls. It will do profitability and legality checks and // classify which kind of tailcall we are able to (or should) do, along with // modifying the trees to perform that kind of tailcall. // GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) { // It should either be an explicit (i.e. tail prefixed) or an implicit tail call assert(call->IsTailPrefixedCall() ^ call->IsImplicitTailCall()); // It cannot be an inline candidate assert(!call->IsInlineCandidate()); auto failTailCall = [&](const char* reason, unsigned lclNum = BAD_VAR_NUM) { #ifdef DEBUG if (verbose) { printf("\nRejecting tail call in morph for call "); printTreeID(call); printf(": %s", reason); if (lclNum != BAD_VAR_NUM) { printf(" V%02u", lclNum); } printf("\n"); } #endif // for non user funcs, we have no handles to report info.compCompHnd->reportTailCallDecision(nullptr, (call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr, call->IsTailPrefixedCall(), TAILCALL_FAIL, reason); // We have checked the candidate so demote. call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; #if FEATURE_TAILCALL_OPT call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL; #endif }; if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) { failTailCall("Might turn into an intrinsic"); return nullptr; } if (call->IsNoReturn() && !call->IsTailPrefixedCall()) { // Such tail calls always throw an exception and we won't be able to see current // Caller() in the stacktrace. failTailCall("Never returns"); return nullptr; } #ifdef DEBUG if (opts.compGcChecks && (info.compRetType == TYP_REF)) { failTailCall("COMPlus_JitGCChecks or stress might have interposed a call to CORINFO_HELP_CHECK_OBJ, " "invalidating tailcall opportunity"); return nullptr; } #endif // We have to ensure to pass the incoming retValBuf as the // outgoing one. Using a temp will not do as this function will // not regain control to do the copy. This can happen when inlining // a tailcall which also has a potential tailcall in it: the IL looks // like we can do a tailcall, but the trees generated use a temp for the inlinee's // result. TODO-CQ: Fix this. if (info.compRetBuffArg != BAD_VAR_NUM) { noway_assert(call->TypeGet() == TYP_VOID); GenTree* retValBuf = call->gtCallArgs->GetNode(); if (retValBuf->gtOper != GT_LCL_VAR || retValBuf->AsLclVarCommon()->GetLclNum() != info.compRetBuffArg) { failTailCall("Need to copy return buffer"); return nullptr; } } // We are still not sure whether it can be a tail call. Because, when converting // a call to an implicit tail call, we must check that there are no locals with // their address taken. If this is the case, we have to assume that the address // has been leaked and the current stack frame must live until after the final // call. // Verify that none of vars has lvHasLdAddrOp or IsAddressExposed() bit set. Note // that lvHasLdAddrOp is much more conservative. We cannot just base it on // IsAddressExposed() alone since it is not guaranteed to be set on all VarDscs // during morph stage. The reason for also checking IsAddressExposed() is that in case // of vararg methods user args are marked as addr exposed but not lvHasLdAddrOp. // The combination of lvHasLdAddrOp and IsAddressExposed() though conservative allows us // never to be incorrect. // // TODO-Throughput: have a compiler level flag to indicate whether method has vars whose // address is taken. Such a flag could be set whenever lvHasLdAddrOp or IsAddressExposed() // is set. This avoids the need for iterating through all lcl vars of the current // method. Right now throughout the code base we are not consistently using 'set' // method to set lvHasLdAddrOp and IsAddressExposed() flags. bool isImplicitOrStressTailCall = call->IsImplicitTailCall() || call->IsStressTailCall(); if (isImplicitOrStressTailCall && compLocallocUsed) { failTailCall("Localloc used"); return nullptr; } bool hasStructParam = false; for (unsigned varNum = 0; varNum < lvaCount; varNum++) { LclVarDsc* varDsc = lvaGetDesc(varNum); // If the method is marked as an explicit tail call we will skip the // following three hazard checks. // We still must check for any struct parameters and set 'hasStructParam' // so that we won't transform the recursive tail call into a loop. // if (isImplicitOrStressTailCall) { if (varDsc->lvHasLdAddrOp && !lvaIsImplicitByRefLocal(varNum)) { failTailCall("Local address taken", varNum); return nullptr; } if (varDsc->IsAddressExposed()) { if (lvaIsImplicitByRefLocal(varNum)) { // The address of the implicit-byref is a non-address use of the pointer parameter. } else if (varDsc->lvIsStructField && lvaIsImplicitByRefLocal(varDsc->lvParentLcl)) { // The address of the implicit-byref's field is likewise a non-address use of the pointer // parameter. } else if (varDsc->lvPromoted && (lvaTable[varDsc->lvFieldLclStart].lvParentLcl != varNum)) { // This temp was used for struct promotion bookkeeping. It will not be used, and will have // its ref count and address-taken flag reset in fgMarkDemotedImplicitByRefArgs. assert(lvaIsImplicitByRefLocal(lvaTable[varDsc->lvFieldLclStart].lvParentLcl)); assert(fgGlobalMorph); } else { failTailCall("Local address taken", varNum); return nullptr; } } if (varDsc->lvPromoted && varDsc->lvIsParam && !lvaIsImplicitByRefLocal(varNum)) { failTailCall("Has Struct Promoted Param", varNum); return nullptr; } if (varDsc->lvPinned) { // A tail call removes the method from the stack, which means the pinning // goes away for the callee. We can't allow that. failTailCall("Has Pinned Vars", varNum); return nullptr; } } if (varTypeIsStruct(varDsc->TypeGet()) && varDsc->lvIsParam) { hasStructParam = true; // This prevents transforming a recursive tail call into a loop // but doesn't prevent tail call optimization so we need to // look at the rest of parameters. } } if (!fgCheckStmtAfterTailCall()) { failTailCall("Unexpected statements after the tail call"); return nullptr; } const char* failReason = nullptr; bool canFastTailCall = fgCanFastTailCall(call, &failReason); CORINFO_TAILCALL_HELPERS tailCallHelpers; bool tailCallViaJitHelper = false; if (!canFastTailCall) { if (call->IsImplicitTailCall()) { // Implicit or opportunistic tail calls are always dispatched via fast tail call // mechanism and never via tail call helper for perf. failTailCall(failReason); return nullptr; } assert(call->IsTailPrefixedCall()); assert(call->tailCallInfo != nullptr); // We do not currently handle non-standard args except for VSD stubs. if (!call->IsVirtualStub() && call->HasNonStandardAddedArgs(this)) { failTailCall( "Method with non-standard args passed in callee trash register cannot be tail called via helper"); return nullptr; } // On x86 we have a faster mechanism than the general one which we use // in almost all cases. See fgCanTailCallViaJitHelper for more information. if (fgCanTailCallViaJitHelper()) { tailCallViaJitHelper = true; } else { // Make sure we can get the helpers. We do this last as the runtime // will likely be required to generate these. CORINFO_RESOLVED_TOKEN* token = nullptr; CORINFO_SIG_INFO* sig = call->tailCallInfo->GetSig(); unsigned flags = 0; if (!call->tailCallInfo->IsCalli()) { token = call->tailCallInfo->GetToken(); if (call->tailCallInfo->IsCallvirt()) { flags |= CORINFO_TAILCALL_IS_CALLVIRT; } } if (call->gtCallThisArg != nullptr) { var_types thisArgType = call->gtCallThisArg->GetNode()->TypeGet(); if (thisArgType != TYP_REF) { flags |= CORINFO_TAILCALL_THIS_ARG_IS_BYREF; } } if (!info.compCompHnd->getTailCallHelpers(token, sig, (CORINFO_GET_TAILCALL_HELPERS_FLAGS)flags, &tailCallHelpers)) { failTailCall("Tail call help not available"); return nullptr; } } } // Check if we can make the tailcall a loop. bool fastTailCallToLoop = false; #if FEATURE_TAILCALL_OPT // TODO-CQ: enable the transformation when the method has a struct parameter that can be passed in a register // or return type is a struct that can be passed in a register. // // TODO-CQ: if the method being compiled requires generic context reported in gc-info (either through // hidden generic context param or through keep alive thisptr), then while transforming a recursive // call to such a method requires that the generic context stored on stack slot be updated. Right now, // fgMorphRecursiveFastTailCallIntoLoop() is not handling update of generic context while transforming // a recursive call into a loop. Another option is to modify gtIsRecursiveCall() to check that the // generic type parameters of both caller and callee generic method are the same. if (opts.compTailCallLoopOpt && canFastTailCall && gtIsRecursiveCall(call) && !lvaReportParamTypeArg() && !lvaKeepAliveAndReportThis() && !call->IsVirtual() && !hasStructParam && !varTypeIsStruct(call->TypeGet())) { fastTailCallToLoop = true; } #endif // Ok -- now we are committed to performing a tailcall. Report the decision. CorInfoTailCall tailCallResult; if (fastTailCallToLoop) { tailCallResult = TAILCALL_RECURSIVE; } else if (canFastTailCall) { tailCallResult = TAILCALL_OPTIMIZED; } else { tailCallResult = TAILCALL_HELPER; } info.compCompHnd->reportTailCallDecision(nullptr, (call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr, call->IsTailPrefixedCall(), tailCallResult, nullptr); // Are we currently planning to expand the gtControlExpr as an early virtual call target? // if (call->IsExpandedEarly() && call->IsVirtualVtable()) { // It isn't alway profitable to expand a virtual call early // // We alway expand the TAILCALL_HELPER type late. // And we exapnd late when we have an optimized tail call // and the this pointer needs to be evaluated into a temp. // if (tailCallResult == TAILCALL_HELPER) { // We will alway expand this late in lower instead. // (see LowerTailCallViaJitHelper as it needs some work // for us to be able to expand this earlier in morph) // call->ClearExpandedEarly(); } else if ((tailCallResult == TAILCALL_OPTIMIZED) && ((call->gtCallThisArg->GetNode()->gtFlags & GTF_SIDE_EFFECT) != 0)) { // We generate better code when we expand this late in lower instead. // call->ClearExpandedEarly(); } } // Now actually morph the call. compTailCallUsed = true; // This will prevent inlining this call. call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL; if (tailCallViaJitHelper) { call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL_VIA_JIT_HELPER; } #if FEATURE_TAILCALL_OPT if (fastTailCallToLoop) { call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL_TO_LOOP; } #endif // Mark that this is no longer a pending tailcall. We need to do this before // we call fgMorphCall again (which happens in the fast tailcall case) to // avoid recursing back into this method. call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; #if FEATURE_TAILCALL_OPT call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL; #endif #ifdef DEBUG if (verbose) { printf("\nGTF_CALL_M_TAILCALL bit set for call "); printTreeID(call); printf("\n"); if (fastTailCallToLoop) { printf("\nGTF_CALL_M_TAILCALL_TO_LOOP bit set for call "); printTreeID(call); printf("\n"); } } #endif // For R2R we might need a different entry point for this call if we are doing a tailcall. // The reason is that the normal delay load helper uses the return address to find the indirection // cell in xarch, but now the JIT is expected to leave the indirection cell in REG_R2R_INDIRECT_PARAM: // We optimize delegate invocations manually in the JIT so skip this for those. if (call->IsR2RRelativeIndir() && canFastTailCall && !fastTailCallToLoop && !call->IsDelegateInvoke()) { info.compCompHnd->updateEntryPointForTailCall(&call->gtEntryPoint); #ifdef TARGET_XARCH // We have already computed arg info to make the fast tailcall decision, but on X64 we now // have to pass the indirection cell, so redo arg info. call->ResetArgInfo(); #endif } // If this block has a flow successor, make suitable updates. // BasicBlock* const nextBlock = compCurBB->GetUniqueSucc(); if (nextBlock == nullptr) { // No unique successor. compCurBB should be a return. // assert(compCurBB->bbJumpKind == BBJ_RETURN); } else { // Flow no longer reaches nextBlock from here. // fgRemoveRefPred(nextBlock, compCurBB); // Adjust profile weights. // // Note if this is a tail call to loop, further updates // are needed once we install the loop edge. // if (compCurBB->hasProfileWeight() && nextBlock->hasProfileWeight()) { // Since we have linear flow we can update the next block weight. // weight_t const blockWeight = compCurBB->bbWeight; weight_t const nextWeight = nextBlock->bbWeight; weight_t const newNextWeight = nextWeight - blockWeight; // If the math would result in a negative weight then there's // no local repair we can do; just leave things inconsistent. // if (newNextWeight >= 0) { // Note if we'd already morphed the IR in nextblock we might // have done something profile sensitive that we should arguably reconsider. // JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n", nextBlock->bbNum, nextWeight, newNextWeight); nextBlock->setBBProfileWeight(newNextWeight); } else { JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT " is less than direct flow pred " FMT_BB " weight " FMT_WT "\n", nextBlock->bbNum, nextWeight, compCurBB->bbNum, blockWeight); } // If nextBlock is not a BBJ_RETURN, it should have a unique successor that // is a BBJ_RETURN, as we allow a little bit of flow after a tail call. // if (nextBlock->bbJumpKind != BBJ_RETURN) { BasicBlock* retBlock = nextBlock->GetUniqueSucc(); // Check if we have a sequence of GT_ASG blocks where the same variable is assigned // to temp locals over and over. // Also allow casts on the RHSs of the assignments, and blocks with GT_NOPs. // // { GT_ASG(t_0, GT_CALL(...)) } // { GT_ASG(t_1, t0) } (with casts on rhs potentially) // ... // { GT_ASG(t_n, t_(n - 1)) } // { GT_RET t_n } // if (retBlock->bbJumpKind != BBJ_RETURN) { // Make sure the block has a single statement assert(nextBlock->firstStmt() == nextBlock->lastStmt()); // And the root node is "ASG(LCL_VAR, LCL_VAR)" GenTree* asgNode = nextBlock->firstStmt()->GetRootNode(); assert(asgNode->OperIs(GT_ASG)); unsigned lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum(); while (retBlock->bbJumpKind != BBJ_RETURN) { #ifdef DEBUG Statement* nonEmptyStmt = nullptr; for (Statement* const stmt : retBlock->Statements()) { // Ignore NOP statements if (!stmt->GetRootNode()->OperIs(GT_NOP)) { // Only a single non-NOP statement is allowed assert(nonEmptyStmt == nullptr); nonEmptyStmt = stmt; } } if (nonEmptyStmt != nullptr) { asgNode = nonEmptyStmt->GetRootNode(); if (!asgNode->OperIs(GT_NOP)) { assert(asgNode->OperIs(GT_ASG)); GenTree* rhs = asgNode->gtGetOp2(); while (rhs->OperIs(GT_CAST)) { assert(!rhs->gtOverflow()); rhs = rhs->gtGetOp1(); } assert(lcl == rhs->AsLclVarCommon()->GetLclNum()); lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum(); } } #endif retBlock = retBlock->GetUniqueSucc(); } } assert(retBlock->bbJumpKind == BBJ_RETURN); if (retBlock->hasProfileWeight()) { // Do similar updates here. // weight_t const nextNextWeight = retBlock->bbWeight; weight_t const newNextNextWeight = nextNextWeight - blockWeight; // If the math would result in an negative weight then there's // no local repair we can do; just leave things inconsistent. // if (newNextNextWeight >= 0) { JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n", retBlock->bbNum, nextNextWeight, newNextNextWeight); retBlock->setBBProfileWeight(newNextNextWeight); } else { JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT " is less than direct flow pred " FMT_BB " weight " FMT_WT "\n", retBlock->bbNum, nextNextWeight, compCurBB->bbNum, blockWeight); } } } } } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // We enable shared-ret tail call optimization for recursive calls even if // FEATURE_TAILCALL_OPT_SHARED_RETURN is not defined. if (gtIsRecursiveCall(call)) #endif { // Many tailcalls will have call and ret in the same block, and thus be // BBJ_RETURN, but if the call falls through to a ret, and we are doing a // tailcall, change it here. compCurBB->bbJumpKind = BBJ_RETURN; } GenTree* stmtExpr = fgMorphStmt->GetRootNode(); #ifdef DEBUG // Tail call needs to be in one of the following IR forms // Either a call stmt or // GT_RETURN(GT_CALL(..)) or GT_RETURN(GT_CAST(GT_CALL(..))) // var = GT_CALL(..) or var = (GT_CAST(GT_CALL(..))) // GT_COMMA(GT_CALL(..), GT_NOP) or GT_COMMA(GT_CAST(GT_CALL(..)), GT_NOP) // In the above, // GT_CASTS may be nested. genTreeOps stmtOper = stmtExpr->gtOper; if (stmtOper == GT_CALL) { assert(stmtExpr == call); } else { assert(stmtOper == GT_RETURN || stmtOper == GT_ASG || stmtOper == GT_COMMA); GenTree* treeWithCall; if (stmtOper == GT_RETURN) { treeWithCall = stmtExpr->gtGetOp1(); } else if (stmtOper == GT_COMMA) { // Second operation must be nop. assert(stmtExpr->gtGetOp2()->IsNothingNode()); treeWithCall = stmtExpr->gtGetOp1(); } else { treeWithCall = stmtExpr->gtGetOp2(); } // Peel off casts while (treeWithCall->gtOper == GT_CAST) { assert(!treeWithCall->gtOverflow()); treeWithCall = treeWithCall->gtGetOp1(); } assert(treeWithCall == call); } #endif // Store the call type for later to introduce the correct placeholder. var_types origCallType = call->TypeGet(); GenTree* result; if (!canFastTailCall && !tailCallViaJitHelper) { // For tailcall via CORINFO_TAILCALL_HELPERS we transform into regular // calls with (to the JIT) regular control flow so we do not need to do // much special handling. result = fgMorphTailCallViaHelpers(call, tailCallHelpers); } else { // Otherwise we will transform into something that does not return. For // fast tailcalls a "jump" and for tailcall via JIT helper a call to a // JIT helper that does not return. So peel off everything after the // call. Statement* nextMorphStmt = fgMorphStmt->GetNextStmt(); JITDUMP("Remove all stmts after the call.\n"); while (nextMorphStmt != nullptr) { Statement* stmtToRemove = nextMorphStmt; nextMorphStmt = stmtToRemove->GetNextStmt(); fgRemoveStmt(compCurBB, stmtToRemove); } bool isRootReplaced = false; GenTree* root = fgMorphStmt->GetRootNode(); if (root != call) { JITDUMP("Replace root node [%06d] with [%06d] tail call node.\n", dspTreeID(root), dspTreeID(call)); isRootReplaced = true; fgMorphStmt->SetRootNode(call); } // Avoid potential extra work for the return (for example, vzeroupper) call->gtType = TYP_VOID; // The runtime requires that we perform a null check on the `this` argument before // tail calling to a virtual dispatch stub. This requirement is a consequence of limitations // in the runtime's ability to map an AV to a NullReferenceException if // the AV occurs in a dispatch stub that has unmanaged caller. if (call->IsVirtualStub()) { call->gtFlags |= GTF_CALL_NULLCHECK; } // Do some target-specific transformations (before we process the args, // etc.) for the JIT helper case. if (tailCallViaJitHelper) { fgMorphTailCallViaJitHelper(call); // Force re-evaluating the argInfo. fgMorphTailCallViaJitHelper will modify the // argument list, invalidating the argInfo. call->fgArgInfo = nullptr; } // Tail call via JIT helper: The VM can't use return address hijacking // if we're not going to return and the helper doesn't have enough info // to safely poll, so we poll before the tail call, if the block isn't // already safe. Since tail call via helper is a slow mechanism it // doen't matter whether we emit GC poll. his is done to be in parity // with Jit64. Also this avoids GC info size increase if all most all // methods are expected to be tail calls (e.g. F#). // // Note that we can avoid emitting GC-poll if we know that the current // BB is dominated by a Gc-SafePoint block. But we don't have dominator // info at this point. One option is to just add a place holder node for // GC-poll (e.g. GT_GCPOLL) here and remove it in lowering if the block // is dominated by a GC-SafePoint. For now it not clear whether // optimizing slow tail calls is worth the effort. As a low cost check, // we check whether the first and current basic blocks are // GC-SafePoints. // // Fast Tail call as epilog+jmp - No need to insert GC-poll. Instead, // fgSetBlockOrder() is going to mark the method as fully interruptible // if the block containing this tail call is reachable without executing // any call. BasicBlock* curBlock = compCurBB; if (canFastTailCall || (fgFirstBB->bbFlags & BBF_GC_SAFE_POINT) || (compCurBB->bbFlags & BBF_GC_SAFE_POINT) || (fgCreateGCPoll(GCPOLL_INLINE, compCurBB) == curBlock)) { // We didn't insert a poll block, so we need to morph the call now // (Normally it will get morphed when we get to the split poll block) GenTree* temp = fgMorphCall(call); noway_assert(temp == call); } // Fast tail call: in case of fast tail calls, we need a jmp epilog and // hence mark it as BBJ_RETURN with BBF_JMP flag set. noway_assert(compCurBB->bbJumpKind == BBJ_RETURN); if (canFastTailCall) { compCurBB->bbFlags |= BBF_HAS_JMP; } else { // We call CORINFO_HELP_TAILCALL which does not return, so we will // not need epilogue. compCurBB->bbJumpKind = BBJ_THROW; } if (isRootReplaced) { // We have replaced the root node of this stmt and deleted the rest, // but we still have the deleted, dead nodes on the `fgMorph*` stack // if the root node was an `ASG`, `RET` or `CAST`. // Return a zero con node to exit morphing of the old trees without asserts // and forbid POST_ORDER morphing doing something wrong with our call. var_types callType; if (varTypeIsStruct(origCallType)) { CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd; Compiler::structPassingKind howToReturnStruct; callType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct); assert((howToReturnStruct != SPK_Unknown) && (howToReturnStruct != SPK_ByReference)); if (howToReturnStruct == SPK_ByValue) { callType = TYP_I_IMPL; } else if (howToReturnStruct == SPK_ByValueAsHfa || varTypeIsSIMD(callType)) { callType = TYP_FLOAT; } assert((callType != TYP_UNKNOWN) && !varTypeIsStruct(callType)); } else { callType = origCallType; } assert((callType != TYP_UNKNOWN) && !varTypeIsStruct(callType)); callType = genActualType(callType); GenTree* zero = gtNewZeroConNode(callType); result = fgMorphTree(zero); } else { result = call; } } return result; } //------------------------------------------------------------------------ // fgMorphTailCallViaHelpers: Transform the given GT_CALL tree for tailcall code // generation. // // Arguments: // call - The call to transform // helpers - The tailcall helpers provided by the runtime. // // Return Value: // Returns the transformed node. // // Notes: // This transforms // GT_CALL // {callTarget} // {this} // {args} // into // GT_COMMA // GT_CALL StoreArgsStub // {callTarget} (depending on flags provided by the runtime) // {this} (as a regular arg) // {args} // GT_COMMA // GT_CALL Dispatcher // GT_ADDR ReturnAddress // {CallTargetStub} // GT_ADDR ReturnValue // GT_LCL ReturnValue // whenever the call node returns a value. If the call node does not return a // value the last comma will not be there. // GenTree* Compiler::fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help) { // R2R requires different handling but we don't support tailcall via // helpers in R2R yet, so just leave it for now. // TODO: R2R: TailCallViaHelper assert(!opts.IsReadyToRun()); JITDUMP("fgMorphTailCallViaHelpers (before):\n"); DISPTREE(call); // Don't support tail calling helper methods assert(call->gtCallType != CT_HELPER); // We come this route only for tail prefixed calls that cannot be dispatched as // fast tail calls assert(!call->IsImplicitTailCall()); // We want to use the following assert, but it can modify the IR in some cases, so we // can't do that in an assert. // assert(!fgCanFastTailCall(call, nullptr)); // We might or might not have called fgInitArgInfo before this point: in // builds with FEATURE_FASTTAILCALL we will have called it when checking if // we could do a fast tailcall, so it is possible we have added extra IR // for non-standard args that we must get rid of. Get rid of that IR here // and do this first as it will 'expose' the retbuf as the first arg, which // we rely upon in fgCreateCallDispatcherAndGetResult. call->ResetArgInfo(); GenTree* callDispatcherAndGetResult = fgCreateCallDispatcherAndGetResult(call, help.hCallTarget, help.hDispatcher); // Change the call to a call to the StoreArgs stub. if (call->HasRetBufArg()) { JITDUMP("Removing retbuf"); call->gtCallArgs = call->gtCallArgs->GetNext(); call->gtCallMoreFlags &= ~GTF_CALL_M_RETBUFFARG; } const bool stubNeedsTargetFnPtr = (help.flags & CORINFO_TAILCALL_STORE_TARGET) != 0; GenTree* doBeforeStoreArgsStub = nullptr; GenTree* thisPtrStubArg = nullptr; // Put 'this' in normal param list if (call->gtCallThisArg != nullptr) { JITDUMP("Moving this pointer into arg list\n"); GenTree* objp = call->gtCallThisArg->GetNode(); GenTree* thisPtr = nullptr; call->gtCallThisArg = nullptr; // JIT will need one or two copies of "this" in the following cases: // 1) the call needs null check; // 2) StoreArgs stub needs the target function pointer address and if the call is virtual // the stub also needs "this" in order to evalute the target. const bool callNeedsNullCheck = call->NeedsNullCheck(); const bool stubNeedsThisPtr = stubNeedsTargetFnPtr && call->IsVirtual(); // TODO-Review: The following transformation is implemented under assumption that // both conditions can be true. However, I could not construct such example // where a virtual tail call would require null check. In case, if the conditions // are mutually exclusive the following could be simplified. if (callNeedsNullCheck || stubNeedsThisPtr) { // Clone "this" if "this" has no side effects. if ((objp->gtFlags & GTF_SIDE_EFFECT) == 0) { thisPtr = gtClone(objp, true); } // Create a temp and spill "this" to the temp if "this" has side effects or "this" was too complex to clone. if (thisPtr == nullptr) { const unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr")); // tmp = "this" doBeforeStoreArgsStub = gtNewTempAssign(lclNum, objp); if (callNeedsNullCheck) { // COMMA(tmp = "this", deref(tmp)) GenTree* tmp = gtNewLclvNode(lclNum, objp->TypeGet()); GenTree* nullcheck = gtNewNullCheck(tmp, compCurBB); doBeforeStoreArgsStub = gtNewOperNode(GT_COMMA, TYP_VOID, doBeforeStoreArgsStub, nullcheck); } thisPtr = gtNewLclvNode(lclNum, objp->TypeGet()); if (stubNeedsThisPtr) { thisPtrStubArg = gtNewLclvNode(lclNum, objp->TypeGet()); } } else { if (callNeedsNullCheck) { // deref("this") doBeforeStoreArgsStub = gtNewNullCheck(objp, compCurBB); if (stubNeedsThisPtr) { thisPtrStubArg = gtClone(objp, true); } } else { assert(stubNeedsThisPtr); thisPtrStubArg = objp; } } call->gtFlags &= ~GTF_CALL_NULLCHECK; assert((thisPtrStubArg != nullptr) == stubNeedsThisPtr); } else { thisPtr = objp; } // During rationalization tmp="this" and null check will be materialized // in the right execution order. assert(thisPtr != nullptr); call->gtCallArgs = gtPrependNewCallArg(thisPtr, call->gtCallArgs); } // We may need to pass the target, for instance for calli or generic methods // where we pass instantiating stub. if (stubNeedsTargetFnPtr) { JITDUMP("Adding target since VM requested it\n"); GenTree* target; if (!call->IsVirtual()) { if (call->gtCallType == CT_INDIRECT) { noway_assert(call->gtCallAddr != nullptr); target = call->gtCallAddr; } else { CORINFO_CONST_LOOKUP addrInfo; info.compCompHnd->getFunctionEntryPoint(call->gtCallMethHnd, &addrInfo); CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(addrInfo.accessType != IAT_PPVALUE && addrInfo.accessType != IAT_RELPVALUE); if (addrInfo.accessType == IAT_VALUE) { handle = addrInfo.handle; } else if (addrInfo.accessType == IAT_PVALUE) { pIndirection = addrInfo.addr; } target = gtNewIconEmbHndNode(handle, pIndirection, GTF_ICON_FTN_ADDR, call->gtCallMethHnd); } } else { assert(!call->tailCallInfo->GetSig()->hasTypeArg()); CORINFO_CALL_INFO callInfo; unsigned flags = CORINFO_CALLINFO_LDFTN; if (call->tailCallInfo->IsCallvirt()) { flags |= CORINFO_CALLINFO_CALLVIRT; } eeGetCallInfo(call->tailCallInfo->GetToken(), nullptr, (CORINFO_CALLINFO_FLAGS)flags, &callInfo); target = getVirtMethodPointerTree(thisPtrStubArg, call->tailCallInfo->GetToken(), &callInfo); } // Insert target as last arg GenTreeCall::Use** newArgSlot = &call->gtCallArgs; while (*newArgSlot != nullptr) { newArgSlot = &(*newArgSlot)->NextRef(); } *newArgSlot = gtNewCallArgs(target); } // This is now a direct call to the store args stub and not a tailcall. call->gtCallType = CT_USER_FUNC; call->gtCallMethHnd = help.hStoreArgs; call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK; call->gtCallMoreFlags &= ~(GTF_CALL_M_TAILCALL | GTF_CALL_M_DELEGATE_INV | GTF_CALL_M_WRAPPER_DELEGATE_INV); // The store-args stub returns no value. call->gtRetClsHnd = nullptr; call->gtType = TYP_VOID; call->gtReturnType = TYP_VOID; GenTree* callStoreArgsStub = call; if (doBeforeStoreArgsStub != nullptr) { callStoreArgsStub = gtNewOperNode(GT_COMMA, TYP_VOID, doBeforeStoreArgsStub, callStoreArgsStub); } GenTree* finalTree = gtNewOperNode(GT_COMMA, callDispatcherAndGetResult->TypeGet(), callStoreArgsStub, callDispatcherAndGetResult); finalTree = fgMorphTree(finalTree); JITDUMP("fgMorphTailCallViaHelpers (after):\n"); DISPTREE(finalTree); return finalTree; } //------------------------------------------------------------------------ // fgCreateCallDispatcherAndGetResult: Given a call // CALL // {callTarget} // {retbuf} // {this} // {args} // create a similarly typed node that calls the tailcall dispatcher and returns // the result, as in the following: // COMMA // CALL TailCallDispatcher // ADDR ReturnAddress // &CallTargetFunc // ADDR RetValue // RetValue // If the call has type TYP_VOID, only create the CALL node. // // Arguments: // origCall - the call // callTargetStubHnd - the handle of the CallTarget function (this is a special // IL stub created by the runtime) // dispatcherHnd - the handle of the tailcall dispatcher function // // Return Value: // A node that can be used in place of the original call. // GenTree* Compiler::fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall, CORINFO_METHOD_HANDLE callTargetStubHnd, CORINFO_METHOD_HANDLE dispatcherHnd) { GenTreeCall* callDispatcherNode = gtNewCallNode(CT_USER_FUNC, dispatcherHnd, TYP_VOID, nullptr, fgMorphStmt->GetDebugInfo()); // The dispatcher has signature // void DispatchTailCalls(void* callersRetAddrSlot, void* callTarget, void* retValue) // Add return value arg. GenTree* retValArg; GenTree* retVal = nullptr; unsigned int newRetLcl = BAD_VAR_NUM; GenTree* copyToRetBufNode = nullptr; if (origCall->HasRetBufArg()) { JITDUMP("Transferring retbuf\n"); GenTree* retBufArg = origCall->gtCallArgs->GetNode(); assert(info.compRetBuffArg != BAD_VAR_NUM); assert(retBufArg->OperIsLocal()); assert(retBufArg->AsLclVarCommon()->GetLclNum() == info.compRetBuffArg); // Caller return buffer argument retBufArg can point to GC heap while the dispatcher expects // the return value argument retValArg to point to the stack. // We use a temporary stack allocated return buffer to hold the value during the dispatcher call // and copy the value back to the caller return buffer after that. unsigned int tmpRetBufNum = lvaGrabTemp(true DEBUGARG("substitute local for return buffer")); constexpr bool unsafeValueClsCheck = false; lvaSetStruct(tmpRetBufNum, origCall->gtRetClsHnd, unsafeValueClsCheck); lvaSetVarAddrExposed(tmpRetBufNum DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF)); var_types tmpRetBufType = lvaGetDesc(tmpRetBufNum)->TypeGet(); retValArg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(tmpRetBufNum, tmpRetBufType)); var_types callerRetBufType = lvaGetDesc(info.compRetBuffArg)->TypeGet(); GenTree* dstAddr = gtNewLclvNode(info.compRetBuffArg, callerRetBufType); GenTree* dst = gtNewObjNode(info.compMethodInfo->args.retTypeClass, dstAddr); GenTree* src = gtNewLclvNode(tmpRetBufNum, tmpRetBufType); constexpr bool isVolatile = false; constexpr bool isCopyBlock = true; copyToRetBufNode = gtNewBlkOpNode(dst, src, isVolatile, isCopyBlock); if (origCall->gtType != TYP_VOID) { retVal = gtClone(retBufArg); } } else if (origCall->gtType != TYP_VOID) { JITDUMP("Creating a new temp for the return value\n"); newRetLcl = lvaGrabTemp(false DEBUGARG("Return value for tail call dispatcher")); if (varTypeIsStruct(origCall->gtType)) { lvaSetStruct(newRetLcl, origCall->gtRetClsHnd, false); } else { // Since we pass a reference to the return value to the dispatcher // we need to use the real return type so we can normalize it on // load when we return it. lvaTable[newRetLcl].lvType = (var_types)origCall->gtReturnType; } lvaSetVarAddrExposed(newRetLcl DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF)); retValArg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(newRetLcl, genActualType(lvaTable[newRetLcl].lvType))); retVal = gtNewLclvNode(newRetLcl, genActualType(lvaTable[newRetLcl].lvType)); if (varTypeIsStruct(origCall->gtType)) { retVal = impFixupStructReturnType(retVal, origCall->gtRetClsHnd, origCall->GetUnmanagedCallConv()); } } else { JITDUMP("No return value so using null pointer as arg\n"); retValArg = gtNewZeroConNode(TYP_I_IMPL); } callDispatcherNode->gtCallArgs = gtPrependNewCallArg(retValArg, callDispatcherNode->gtCallArgs); // Add callTarget callDispatcherNode->gtCallArgs = gtPrependNewCallArg(new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, callTargetStubHnd), callDispatcherNode->gtCallArgs); // Add the caller's return address slot. if (lvaRetAddrVar == BAD_VAR_NUM) { lvaRetAddrVar = lvaGrabTemp(false DEBUGARG("Return address")); lvaTable[lvaRetAddrVar].lvType = TYP_I_IMPL; lvaSetVarAddrExposed(lvaRetAddrVar DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF)); } GenTree* retAddrSlot = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaRetAddrVar, TYP_I_IMPL)); callDispatcherNode->gtCallArgs = gtPrependNewCallArg(retAddrSlot, callDispatcherNode->gtCallArgs); GenTree* finalTree = callDispatcherNode; if (copyToRetBufNode != nullptr) { finalTree = gtNewOperNode(GT_COMMA, TYP_VOID, callDispatcherNode, copyToRetBufNode); } if (origCall->gtType == TYP_VOID) { return finalTree; } assert(retVal != nullptr); finalTree = gtNewOperNode(GT_COMMA, origCall->TypeGet(), finalTree, retVal); // The JIT seems to want to CSE this comma and messes up multi-reg ret // values in the process. Just avoid CSE'ing this tree entirely in that // case. if (origCall->HasMultiRegRetVal()) { finalTree->gtFlags |= GTF_DONT_CSE; } return finalTree; } //------------------------------------------------------------------------ // getLookupTree: get a lookup tree // // Arguments: // pResolvedToken - resolved token of the call // pLookup - the lookup to get the tree for // handleFlags - flags to set on the result node // compileTimeHandle - compile-time handle corresponding to the lookup // // Return Value: // A node representing the lookup tree // GenTree* Compiler::getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { if (!pLookup->lookupKind.needsRuntimeLookup) { // No runtime lookup is required. // Access is direct or memory-indirect (of a fixed address) reference CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE); if (pLookup->constLookup.accessType == IAT_VALUE) { handle = pLookup->constLookup.handle; } else if (pLookup->constLookup.accessType == IAT_PVALUE) { pIndirection = pLookup->constLookup.addr; } return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); } return getRuntimeLookupTree(pResolvedToken, pLookup, compileTimeHandle); } //------------------------------------------------------------------------ // getRuntimeLookupTree: get a tree for a runtime lookup // // Arguments: // pResolvedToken - resolved token of the call // pLookup - the lookup to get the tree for // compileTimeHandle - compile-time handle corresponding to the lookup // // Return Value: // A node representing the runtime lookup tree // GenTree* Compiler::getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle) { assert(!compIsForInlining()); CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup; // If pRuntimeLookup->indirections is equal to CORINFO_USEHELPER, it specifies that a run-time helper should be // used; otherwise, it specifies the number of indirections via pRuntimeLookup->offsets array. if ((pRuntimeLookup->indirections == CORINFO_USEHELPER) || pRuntimeLookup->testForNull || pRuntimeLookup->testForFixup) { // If the first condition is true, runtime lookup tree is available only via the run-time helper function. // TODO-CQ If the second or third condition is true, we are always using the slow path since we can't // introduce control flow at this point. See impRuntimeLookupToTree for the logic to avoid calling the helper. // The long-term solution is to introduce a new node representing a runtime lookup, create instances // of that node both in the importer and here, and expand the node in lower (introducing control flow if // necessary). return gtNewRuntimeLookupHelperCallNode(pRuntimeLookup, getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind), compileTimeHandle); } GenTree* result = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind); ArrayStack<GenTree*> stmts(getAllocator(CMK_ArrayStack)); auto cloneTree = [&](GenTree** tree DEBUGARG(const char* reason)) -> GenTree* { if (!((*tree)->gtFlags & GTF_GLOB_EFFECT)) { GenTree* clone = gtClone(*tree, true); if (clone) { return clone; } } unsigned temp = lvaGrabTemp(true DEBUGARG(reason)); stmts.Push(gtNewTempAssign(temp, *tree)); *tree = gtNewLclvNode(temp, lvaGetActualType(temp)); return gtNewLclvNode(temp, lvaGetActualType(temp)); }; // Apply repeated indirections for (WORD i = 0; i < pRuntimeLookup->indirections; i++) { GenTree* preInd = nullptr; if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { preInd = cloneTree(&result DEBUGARG("getRuntimeLookupTree indirectOffset")); } if (i != 0) { result = gtNewOperNode(GT_IND, TYP_I_IMPL, result); result->gtFlags |= GTF_IND_NONFAULTING; result->gtFlags |= GTF_IND_INVARIANT; } if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { result = gtNewOperNode(GT_ADD, TYP_I_IMPL, preInd, result); } if (pRuntimeLookup->offsets[i] != 0) { result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL)); } } assert(!pRuntimeLookup->testForNull); if (pRuntimeLookup->indirections > 0) { assert(!pRuntimeLookup->testForFixup); result = gtNewOperNode(GT_IND, TYP_I_IMPL, result); result->gtFlags |= GTF_IND_NONFAULTING; } // Produces GT_COMMA(stmt1, GT_COMMA(stmt2, ... GT_COMMA(stmtN, result))) while (!stmts.Empty()) { result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, stmts.Pop(), result); } DISPTREE(result); return result; } //------------------------------------------------------------------------ // getVirtMethodPointerTree: get a tree for a virtual method pointer // // Arguments: // thisPtr - tree representing `this` pointer // pResolvedToken - pointer to the resolved token of the method // pCallInfo - pointer to call info // // Return Value: // A node representing the virtual method pointer GenTree* Compiler::getVirtMethodPointerTree(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* exactTypeDesc = getTokenHandleTree(pResolvedToken, true); GenTree* exactMethodDesc = getTokenHandleTree(pResolvedToken, false); GenTreeCall::Use* helpArgs = gtNewCallArgs(thisPtr, exactTypeDesc, exactMethodDesc); return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs); } //------------------------------------------------------------------------ // getTokenHandleTree: get a handle tree for a token // // Arguments: // pResolvedToken - token to get a handle for // parent - whether parent should be imported // // Return Value: // A node representing the virtual method pointer GenTree* Compiler::getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent) { CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->embedGenericHandle(pResolvedToken, parent, &embedInfo); GenTree* result = getLookupTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token), embedInfo.compileTimeHandle); // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node. if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup) { result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result); } return result; } /***************************************************************************** * * Transform the given GT_CALL tree for tail call via JIT helper. */ void Compiler::fgMorphTailCallViaJitHelper(GenTreeCall* call) { JITDUMP("fgMorphTailCallViaJitHelper (before):\n"); DISPTREE(call); // For the helper-assisted tail calls, we need to push all the arguments // into a single list, and then add a few extra at the beginning or end. // // For x86, the tailcall helper is defined as: // // JIT_TailCall(<function args>, int numberOfOldStackArgsWords, int numberOfNewStackArgsWords, int flags, void* // callTarget) // // Note that the special arguments are on the stack, whereas the function arguments follow // the normal convention: there might be register arguments in ECX and EDX. The stack will // look like (highest address at the top): // first normal stack argument // ... // last normal stack argument // numberOfOldStackArgs // numberOfNewStackArgs // flags // callTarget // // Each special arg is 4 bytes. // // 'flags' is a bitmask where: // 1 == restore callee-save registers (EDI,ESI,EBX). The JIT always saves all // callee-saved registers for tailcall functions. Note that the helper assumes // that the callee-saved registers live immediately below EBP, and must have been // pushed in this order: EDI, ESI, EBX. // 2 == call target is a virtual stub dispatch. // // The x86 tail call helper lives in VM\i386\jithelp.asm. See that function for more details // on the custom calling convention. // Check for PInvoke call types that we don't handle in codegen yet. assert(!call->IsUnmanaged()); assert(call->IsVirtual() || (call->gtCallType != CT_INDIRECT) || (call->gtCallCookie == nullptr)); // Don't support tail calling helper methods assert(call->gtCallType != CT_HELPER); // We come this route only for tail prefixed calls that cannot be dispatched as // fast tail calls assert(!call->IsImplicitTailCall()); // We want to use the following assert, but it can modify the IR in some cases, so we // can't do that in an assert. // assert(!fgCanFastTailCall(call, nullptr)); // First move the 'this' pointer (if any) onto the regular arg list. We do this because // we are going to prepend special arguments onto the argument list (for non-x86 platforms), // and thus shift where the 'this' pointer will be passed to a later argument slot. In // addition, for all platforms, we are going to change the call into a helper call. Our code // generation code for handling calls to helpers does not handle 'this' pointers. So, when we // do this transformation, we must explicitly create a null 'this' pointer check, if required, // since special 'this' pointer handling will no longer kick in. // // Some call types, such as virtual vtable calls, require creating a call address expression // that involves the "this" pointer. Lowering will sometimes create an embedded statement // to create a temporary that is assigned to the "this" pointer expression, and then use // that temp to create the call address expression. This temp creation embedded statement // will occur immediately before the "this" pointer argument, and then will be used for both // the "this" pointer argument as well as the call address expression. In the normal ordering, // the embedded statement establishing the "this" pointer temp will execute before both uses // of the temp. However, for tail calls via a helper, we move the "this" pointer onto the // normal call argument list, and insert a placeholder which will hold the call address // expression. For non-x86, things are ok, because the order of execution of these is not // altered. However, for x86, the call address expression is inserted as the *last* argument // in the argument list, *after* the "this" pointer. It will be put on the stack, and be // evaluated first. To ensure we don't end up with out-of-order temp definition and use, // for those cases where call lowering creates an embedded form temp of "this", we will // create a temp here, early, that will later get morphed correctly. if (call->gtCallThisArg != nullptr) { GenTree* thisPtr = nullptr; GenTree* objp = call->gtCallThisArg->GetNode(); call->gtCallThisArg = nullptr; if ((call->IsDelegateInvoke() || call->IsVirtualVtable()) && !objp->OperIs(GT_LCL_VAR)) { // tmp = "this" unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr")); GenTree* asg = gtNewTempAssign(lclNum, objp); // COMMA(tmp = "this", tmp) var_types vt = objp->TypeGet(); GenTree* tmp = gtNewLclvNode(lclNum, vt); thisPtr = gtNewOperNode(GT_COMMA, vt, asg, tmp); objp = thisPtr; } if (call->NeedsNullCheck()) { // clone "this" if "this" has no side effects. if ((thisPtr == nullptr) && !(objp->gtFlags & GTF_SIDE_EFFECT)) { thisPtr = gtClone(objp, true); } var_types vt = objp->TypeGet(); if (thisPtr == nullptr) { // create a temp if either "this" has side effects or "this" is too complex to clone. // tmp = "this" unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr")); GenTree* asg = gtNewTempAssign(lclNum, objp); // COMMA(tmp = "this", deref(tmp)) GenTree* tmp = gtNewLclvNode(lclNum, vt); GenTree* nullcheck = gtNewNullCheck(tmp, compCurBB); asg = gtNewOperNode(GT_COMMA, TYP_VOID, asg, nullcheck); // COMMA(COMMA(tmp = "this", deref(tmp)), tmp) thisPtr = gtNewOperNode(GT_COMMA, vt, asg, gtNewLclvNode(lclNum, vt)); } else { // thisPtr = COMMA(deref("this"), "this") GenTree* nullcheck = gtNewNullCheck(thisPtr, compCurBB); thisPtr = gtNewOperNode(GT_COMMA, vt, nullcheck, gtClone(objp, true)); } call->gtFlags &= ~GTF_CALL_NULLCHECK; } else { thisPtr = objp; } // TODO-Cleanup: we leave it as a virtual stub call to // use logic in `LowerVirtualStubCall`, clear GTF_CALL_VIRT_KIND_MASK here // and change `LowerCall` to recognize it as a direct call. // During rationalization tmp="this" and null check will // materialize as embedded stmts in right execution order. assert(thisPtr != nullptr); call->gtCallArgs = gtPrependNewCallArg(thisPtr, call->gtCallArgs); } // Find the end of the argument list. ppArg will point at the last pointer; setting *ppArg will // append to the list. GenTreeCall::Use** ppArg = &call->gtCallArgs; for (GenTreeCall::Use& use : call->Args()) { ppArg = &use.NextRef(); } assert(ppArg != nullptr); assert(*ppArg == nullptr); unsigned nOldStkArgsWords = (compArgSize - (codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES)) / REGSIZE_BYTES; GenTree* arg3 = gtNewIconNode((ssize_t)nOldStkArgsWords, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg3); // numberOfOldStackArgs ppArg = &((*ppArg)->NextRef()); // Inject a placeholder for the count of outgoing stack arguments that the Lowering phase will generate. // The constant will be replaced. GenTree* arg2 = gtNewIconNode(9, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg2); // numberOfNewStackArgs ppArg = &((*ppArg)->NextRef()); // Inject a placeholder for the flags. // The constant will be replaced. GenTree* arg1 = gtNewIconNode(8, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg1); ppArg = &((*ppArg)->NextRef()); // Inject a placeholder for the real call target that the Lowering phase will generate. // The constant will be replaced. GenTree* arg0 = gtNewIconNode(7, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg0); // It is now a varargs tail call. call->gtCallMoreFlags |= GTF_CALL_M_VARARGS; call->gtFlags &= ~GTF_CALL_POP_ARGS; // The function is responsible for doing explicit null check when it is necessary. assert(!call->NeedsNullCheck()); JITDUMP("fgMorphTailCallViaJitHelper (after):\n"); DISPTREE(call); } //------------------------------------------------------------------------ // fgGetStubAddrArg: Return the virtual stub address for the given call. // // Notes: // the JIT must place the address of the stub used to load the call target, // the "stub indirection cell", in special call argument with special register. // // Arguments: // call - a call that needs virtual stub dispatching. // // Return Value: // addr tree with set resister requirements. // GenTree* Compiler::fgGetStubAddrArg(GenTreeCall* call) { assert(call->IsVirtualStub()); GenTree* stubAddrArg; if (call->gtCallType == CT_INDIRECT) { stubAddrArg = gtClone(call->gtCallAddr, true); } else { assert(call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT); ssize_t addr = ssize_t(call->gtStubCallStubAddr); stubAddrArg = gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR); #ifdef DEBUG stubAddrArg->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd; #endif } assert(stubAddrArg != nullptr); stubAddrArg->SetRegNum(virtualStubParamInfo->GetReg()); return stubAddrArg; } //------------------------------------------------------------------------------ // fgGetArgTabEntryParameterLclNum : Get the lcl num for the parameter that // corresponds to the argument to a recursive call. // // Notes: // Due to non-standard args this is not just fgArgTabEntry::argNum. // For example, in R2R compilations we will have added a non-standard // arg for the R2R indirection cell. // // Arguments: // argTabEntry - the arg // unsigned Compiler::fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry) { fgArgInfo* argInfo = call->fgArgInfo; unsigned argCount = argInfo->ArgCount(); fgArgTabEntry** argTable = argInfo->ArgTable(); unsigned numToRemove = 0; for (unsigned i = 0; i < argCount; i++) { fgArgTabEntry* arg = argTable[i]; // Late added args add extra args that do not map to IL parameters and that we should not reassign. if (!arg->isNonStandard() || !arg->isNonStandardArgAddedLate()) continue; if (arg->argNum < argTabEntry->argNum) numToRemove++; } return argTabEntry->argNum - numToRemove; } //------------------------------------------------------------------------------ // fgMorphRecursiveFastTailCallIntoLoop : Transform a recursive fast tail call into a loop. // // // Arguments: // block - basic block ending with a recursive fast tail call // recursiveTailCall - recursive tail call to transform // // Notes: // The legality of the transformation is ensured by the checks in endsWithTailCallConvertibleToLoop. void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall) { assert(recursiveTailCall->IsTailCallConvertibleToLoop()); Statement* lastStmt = block->lastStmt(); assert(recursiveTailCall == lastStmt->GetRootNode()); // Transform recursive tail call into a loop. Statement* earlyArgInsertionPoint = lastStmt; const DebugInfo& callDI = lastStmt->GetDebugInfo(); // Hoist arg setup statement for the 'this' argument. GenTreeCall::Use* thisArg = recursiveTailCall->gtCallThisArg; if ((thisArg != nullptr) && !thisArg->GetNode()->IsNothingNode() && !thisArg->GetNode()->IsArgPlaceHolderNode()) { Statement* thisArgStmt = gtNewStmt(thisArg->GetNode(), callDI); fgInsertStmtBefore(block, earlyArgInsertionPoint, thisArgStmt); } // All arguments whose trees may involve caller parameter local variables need to be assigned to temps first; // then the temps need to be assigned to the method parameters. This is done so that the caller // parameters are not re-assigned before call arguments depending on them are evaluated. // tmpAssignmentInsertionPoint and paramAssignmentInsertionPoint keep track of // where the next temp or parameter assignment should be inserted. // In the example below the first call argument (arg1 - 1) needs to be assigned to a temp first // while the second call argument (const 1) doesn't. // Basic block before tail recursion elimination: // ***** BB04, stmt 1 (top level) // [000037] ------------ * stmtExpr void (top level) (IL 0x00A...0x013) // [000033] --C - G------ - \--* call void RecursiveMethod // [000030] ------------ | / --* const int - 1 // [000031] ------------arg0 in rcx + --* +int // [000029] ------------ | \--* lclVar int V00 arg1 // [000032] ------------arg1 in rdx \--* const int 1 // // // Basic block after tail recursion elimination : // ***** BB04, stmt 1 (top level) // [000051] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? ) // [000030] ------------ | / --* const int - 1 // [000031] ------------ | / --* +int // [000029] ------------ | | \--* lclVar int V00 arg1 // [000050] - A---------- \--* = int // [000049] D------N---- \--* lclVar int V02 tmp0 // // ***** BB04, stmt 2 (top level) // [000055] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? ) // [000052] ------------ | / --* lclVar int V02 tmp0 // [000054] - A---------- \--* = int // [000053] D------N---- \--* lclVar int V00 arg0 // ***** BB04, stmt 3 (top level) // [000058] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? ) // [000032] ------------ | / --* const int 1 // [000057] - A---------- \--* = int // [000056] D------N---- \--* lclVar int V01 arg1 Statement* tmpAssignmentInsertionPoint = lastStmt; Statement* paramAssignmentInsertionPoint = lastStmt; // Process early args. They may contain both setup statements for late args and actual args. // Early args don't include 'this' arg. We need to account for that so that the call to gtArgEntryByArgNum // below has the correct second argument. int earlyArgIndex = (thisArg == nullptr) ? 0 : 1; for (GenTreeCall::Use& use : recursiveTailCall->Args()) { GenTree* earlyArg = use.GetNode(); if (!earlyArg->IsNothingNode() && !earlyArg->IsArgPlaceHolderNode()) { if ((earlyArg->gtFlags & GTF_LATE_ARG) != 0) { // This is a setup node so we need to hoist it. Statement* earlyArgStmt = gtNewStmt(earlyArg, callDI); fgInsertStmtBefore(block, earlyArgInsertionPoint, earlyArgStmt); } else { // This is an actual argument that needs to be assigned to the corresponding caller parameter. fgArgTabEntry* curArgTabEntry = gtArgEntryByArgNum(recursiveTailCall, earlyArgIndex); // Late-added non-standard args are extra args that are not passed as locals, so skip those if (!curArgTabEntry->isNonStandard() || !curArgTabEntry->isNonStandardArgAddedLate()) { Statement* paramAssignStmt = fgAssignRecursiveCallArgToCallerParam(earlyArg, curArgTabEntry, fgGetArgTabEntryParameterLclNum(recursiveTailCall, curArgTabEntry), block, callDI, tmpAssignmentInsertionPoint, paramAssignmentInsertionPoint); if ((tmpAssignmentInsertionPoint == lastStmt) && (paramAssignStmt != nullptr)) { // All temp assignments will happen before the first param assignment. tmpAssignmentInsertionPoint = paramAssignStmt; } } } } earlyArgIndex++; } // Process late args. int lateArgIndex = 0; for (GenTreeCall::Use& use : recursiveTailCall->LateArgs()) { // A late argument is an actual argument that needs to be assigned to the corresponding caller's parameter. GenTree* lateArg = use.GetNode(); fgArgTabEntry* curArgTabEntry = gtArgEntryByLateArgIndex(recursiveTailCall, lateArgIndex); // Late-added non-standard args are extra args that are not passed as locals, so skip those if (!curArgTabEntry->isNonStandard() || !curArgTabEntry->isNonStandardArgAddedLate()) { Statement* paramAssignStmt = fgAssignRecursiveCallArgToCallerParam(lateArg, curArgTabEntry, fgGetArgTabEntryParameterLclNum(recursiveTailCall, curArgTabEntry), block, callDI, tmpAssignmentInsertionPoint, paramAssignmentInsertionPoint); if ((tmpAssignmentInsertionPoint == lastStmt) && (paramAssignStmt != nullptr)) { // All temp assignments will happen before the first param assignment. tmpAssignmentInsertionPoint = paramAssignStmt; } } lateArgIndex++; } // If the method has starg.s 0 or ldarga.s 0 a special local (lvaArg0Var) is created so that // compThisArg stays immutable. Normally it's assigned in fgFirstBBScratch block. Since that // block won't be in the loop (it's assumed to have no predecessors), we need to update the special local here. if (!info.compIsStatic && (lvaArg0Var != info.compThisArg)) { var_types thisType = lvaTable[info.compThisArg].TypeGet(); GenTree* arg0 = gtNewLclvNode(lvaArg0Var, thisType); GenTree* arg0Assignment = gtNewAssignNode(arg0, gtNewLclvNode(info.compThisArg, thisType)); Statement* arg0AssignmentStmt = gtNewStmt(arg0Assignment, callDI); fgInsertStmtBefore(block, paramAssignmentInsertionPoint, arg0AssignmentStmt); } // If compInitMem is set, we may need to zero-initialize some locals. Normally it's done in the prolog // but this loop can't include the prolog. Since we don't have liveness information, we insert zero-initialization // for all non-parameter IL locals as well as temp structs with GC fields. // Liveness phase will remove unnecessary initializations. if (info.compInitMem || compSuppressedZeroInit) { unsigned varNum; LclVarDsc* varDsc; for (varNum = 0, varDsc = lvaTable; varNum < lvaCount; varNum++, varDsc++) { #if FEATURE_FIXED_OUT_ARGS if (varNum == lvaOutgoingArgSpaceVar) { continue; } #endif // FEATURE_FIXED_OUT_ARGS if (!varDsc->lvIsParam) { var_types lclType = varDsc->TypeGet(); bool isUserLocal = (varNum < info.compLocalsCount); bool structWithGCFields = ((lclType == TYP_STRUCT) && varDsc->GetLayout()->HasGCPtr()); bool hadSuppressedInit = varDsc->lvSuppressedZeroInit; if ((info.compInitMem && (isUserLocal || structWithGCFields)) || hadSuppressedInit) { GenTree* lcl = gtNewLclvNode(varNum, lclType); GenTree* init = nullptr; if (varTypeIsStruct(lclType)) { const bool isVolatile = false; const bool isCopyBlock = false; init = gtNewBlkOpNode(lcl, gtNewIconNode(0), isVolatile, isCopyBlock); init = fgMorphInitBlock(init); } else { GenTree* zero = gtNewZeroConNode(genActualType(lclType)); init = gtNewAssignNode(lcl, zero); } Statement* initStmt = gtNewStmt(init, callDI); fgInsertStmtBefore(block, lastStmt, initStmt); } } } } // Remove the call fgRemoveStmt(block, lastStmt); // Set the loop edge. if (opts.IsOSR()) { // Todo: this may not look like a viable loop header. // Might need the moral equivalent of a scratch BB. block->bbJumpDest = fgEntryBB; } else { // Ensure we have a scratch block and then target the next // block. Loop detection needs to see a pred out of the loop, // so mark the scratch block BBF_DONT_REMOVE to prevent empty // block removal on it. fgEnsureFirstBBisScratch(); fgFirstBB->bbFlags |= BBF_DONT_REMOVE; block->bbJumpDest = fgFirstBB->bbNext; } // Finish hooking things up. block->bbJumpKind = BBJ_ALWAYS; fgAddRefPred(block->bbJumpDest, block); block->bbFlags &= ~BBF_HAS_JMP; } //------------------------------------------------------------------------------ // fgAssignRecursiveCallArgToCallerParam : Assign argument to a recursive call to the corresponding caller parameter. // // // Arguments: // arg - argument to assign // argTabEntry - argument table entry corresponding to arg // lclParamNum - the lcl num of the parameter // block --- basic block the call is in // callILOffset - IL offset of the call // tmpAssignmentInsertionPoint - tree before which temp assignment should be inserted (if necessary) // paramAssignmentInsertionPoint - tree before which parameter assignment should be inserted // // Return Value: // parameter assignment statement if one was inserted; nullptr otherwise. Statement* Compiler::fgAssignRecursiveCallArgToCallerParam(GenTree* arg, fgArgTabEntry* argTabEntry, unsigned lclParamNum, BasicBlock* block, const DebugInfo& callDI, Statement* tmpAssignmentInsertionPoint, Statement* paramAssignmentInsertionPoint) { // Call arguments should be assigned to temps first and then the temps should be assigned to parameters because // some argument trees may reference parameters directly. GenTree* argInTemp = nullptr; bool needToAssignParameter = true; // TODO-CQ: enable calls with struct arguments passed in registers. noway_assert(!varTypeIsStruct(arg->TypeGet())); if ((argTabEntry->isTmp) || arg->IsCnsIntOrI() || arg->IsCnsFltOrDbl()) { // The argument is already assigned to a temp or is a const. argInTemp = arg; } else if (arg->OperGet() == GT_LCL_VAR) { unsigned lclNum = arg->AsLclVar()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (!varDsc->lvIsParam) { // The argument is a non-parameter local so it doesn't need to be assigned to a temp. argInTemp = arg; } else if (lclNum == lclParamNum) { // The argument is the same parameter local that we were about to assign so // we can skip the assignment. needToAssignParameter = false; } } // TODO: We don't need temp assignments if we can prove that the argument tree doesn't involve // any caller parameters. Some common cases are handled above but we may be able to eliminate // more temp assignments. Statement* paramAssignStmt = nullptr; if (needToAssignParameter) { if (argInTemp == nullptr) { // The argument is not assigned to a temp. We need to create a new temp and insert an assignment. // TODO: we can avoid a temp assignment if we can prove that the argument tree // doesn't involve any caller parameters. unsigned tmpNum = lvaGrabTemp(true DEBUGARG("arg temp")); lvaTable[tmpNum].lvType = arg->gtType; GenTree* tempSrc = arg; GenTree* tempDest = gtNewLclvNode(tmpNum, tempSrc->gtType); GenTree* tmpAssignNode = gtNewAssignNode(tempDest, tempSrc); Statement* tmpAssignStmt = gtNewStmt(tmpAssignNode, callDI); fgInsertStmtBefore(block, tmpAssignmentInsertionPoint, tmpAssignStmt); argInTemp = gtNewLclvNode(tmpNum, tempSrc->gtType); } // Now assign the temp to the parameter. const LclVarDsc* paramDsc = lvaGetDesc(lclParamNum); assert(paramDsc->lvIsParam); GenTree* paramDest = gtNewLclvNode(lclParamNum, paramDsc->lvType); GenTree* paramAssignNode = gtNewAssignNode(paramDest, argInTemp); paramAssignStmt = gtNewStmt(paramAssignNode, callDI); fgInsertStmtBefore(block, paramAssignmentInsertionPoint, paramAssignStmt); } return paramAssignStmt; } /***************************************************************************** * * Transform the given GT_CALL tree for code generation. */ GenTree* Compiler::fgMorphCall(GenTreeCall* call) { if (call->CanTailCall()) { GenTree* newNode = fgMorphPotentialTailCall(call); if (newNode != nullptr) { return newNode; } assert(!call->CanTailCall()); #if FEATURE_MULTIREG_RET if (fgGlobalMorph && call->HasMultiRegRetVal() && varTypeIsStruct(call->TypeGet())) { // The tail call has been rejected so we must finish the work deferred // by impFixupCallStructReturn for multi-reg-returning calls and transform // ret call // into // temp = call // ret temp // Force re-evaluating the argInfo as the return argument has changed. call->ResetArgInfo(); // Create a new temp. unsigned tmpNum = lvaGrabTemp(false DEBUGARG("Return value temp for multi-reg return (rejected tail call).")); lvaTable[tmpNum].lvIsMultiRegRet = true; CORINFO_CLASS_HANDLE structHandle = call->gtRetClsHnd; assert(structHandle != NO_CLASS_HANDLE); const bool unsafeValueClsCheck = false; lvaSetStruct(tmpNum, structHandle, unsafeValueClsCheck); var_types structType = lvaTable[tmpNum].lvType; GenTree* dst = gtNewLclvNode(tmpNum, structType); GenTree* assg = gtNewAssignNode(dst, call); assg = fgMorphTree(assg); // Create the assignment statement and insert it before the current statement. Statement* assgStmt = gtNewStmt(assg, compCurStmt->GetDebugInfo()); fgInsertStmtBefore(compCurBB, compCurStmt, assgStmt); // Return the temp. GenTree* result = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType); result->gtFlags |= GTF_DONT_CSE; compCurBB->bbFlags |= BBF_HAS_CALL; // This block has a call #ifdef DEBUG if (verbose) { printf("\nInserting assignment of a multi-reg call result to a temp:\n"); gtDispStmt(assgStmt); } result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG return result; } #endif } if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) == 0 && (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_VIRTUAL_FUNC_PTR) #ifdef FEATURE_READYTORUN || call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR) #endif ) && (call == fgMorphStmt->GetRootNode())) { // This is call to CORINFO_HELP_VIRTUAL_FUNC_PTR with ignored result. // Transform it into a null check. GenTree* thisPtr = call->gtCallArgs->GetNode(); GenTree* nullCheck = gtNewNullCheck(thisPtr, compCurBB); return fgMorphTree(nullCheck); } noway_assert(call->gtOper == GT_CALL); // // Only count calls once (only in the global morph phase) // if (fgGlobalMorph) { if (call->gtCallType == CT_INDIRECT) { optCallCount++; optIndirectCallCount++; } else if (call->gtCallType == CT_USER_FUNC) { optCallCount++; if (call->IsVirtual()) { optIndirectCallCount++; } } } // Couldn't inline - remember that this BB contains method calls // Mark the block as a GC safe point for the call if possible. // In the event the call indicates the block isn't a GC safe point // and the call is unmanaged with a GC transition suppression request // then insert a GC poll. CLANG_FORMAT_COMMENT_ANCHOR; if (IsGcSafePoint(call)) { compCurBB->bbFlags |= BBF_GC_SAFE_POINT; } // Regardless of the state of the basic block with respect to GC safe point, // we will always insert a GC Poll for scenarios involving a suppressed GC // transition. Only mark the block for GC Poll insertion on the first morph. if (fgGlobalMorph && call->IsUnmanaged() && call->IsSuppressGCTransition()) { compCurBB->bbFlags |= (BBF_HAS_SUPPRESSGC_CALL | BBF_GC_SAFE_POINT); optMethodFlags |= OMF_NEEDS_GCPOLLS; } // Morph Type.op_Equality, Type.op_Inequality, and Enum.HasFlag // // We need to do these before the arguments are morphed if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)) { // See if this is foldable GenTree* optTree = gtFoldExprCall(call); // If we optimized, morph the result if (optTree != call) { return fgMorphTree(optTree); } } compCurBB->bbFlags |= BBF_HAS_CALL; // This block has a call /* Process the "normal" argument list */ call = fgMorphArgs(call); noway_assert(call->gtOper == GT_CALL); // Should we expand this virtual method call target early here? // if (call->IsExpandedEarly() && call->IsVirtualVtable()) { // We only expand the Vtable Call target once in the global morph phase if (fgGlobalMorph) { assert(call->gtControlExpr == nullptr); // We only call this method and assign gtControlExpr once call->gtControlExpr = fgExpandVirtualVtableCallTarget(call); } // We always have to morph or re-morph the control expr // call->gtControlExpr = fgMorphTree(call->gtControlExpr); // Propagate any gtFlags into the call call->gtFlags |= call->gtControlExpr->gtFlags; } // Morph stelem.ref helper call to store a null value, into a store into an array without the helper. // This needs to be done after the arguments are morphed to ensure constant propagation has already taken place. if (opts.OptimizationEnabled() && (call->gtCallType == CT_HELPER) && (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_ARRADDR_ST))) { GenTree* value = gtArgEntryByArgNum(call, 2)->GetNode(); if (value->IsIntegralConst(0)) { assert(value->OperGet() == GT_CNS_INT); GenTree* arr = gtArgEntryByArgNum(call, 0)->GetNode(); GenTree* index = gtArgEntryByArgNum(call, 1)->GetNode(); // Either or both of the array and index arguments may have been spilled to temps by `fgMorphArgs`. Copy // the spill trees as well if necessary. GenTreeOp* argSetup = nullptr; for (GenTreeCall::Use& use : call->Args()) { GenTree* const arg = use.GetNode(); if (arg->OperGet() != GT_ASG) { continue; } assert(arg != arr); assert(arg != index); arg->gtFlags &= ~GTF_LATE_ARG; GenTree* op1 = argSetup; if (op1 == nullptr) { op1 = gtNewNothingNode(); #if DEBUG op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } argSetup = new (this, GT_COMMA) GenTreeOp(GT_COMMA, TYP_VOID, op1, arg); #if DEBUG argSetup->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } #ifdef DEBUG auto resetMorphedFlag = [](GenTree** slot, fgWalkData* data) -> fgWalkResult { (*slot)->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; return WALK_CONTINUE; }; fgWalkTreePost(&arr, resetMorphedFlag); fgWalkTreePost(&index, resetMorphedFlag); fgWalkTreePost(&value, resetMorphedFlag); #endif // DEBUG GenTree* const arrIndexNode = gtNewIndexRef(TYP_REF, arr, index); GenTree* const arrStore = gtNewAssignNode(arrIndexNode, value); GenTree* result = fgMorphTree(arrStore); if (argSetup != nullptr) { result = new (this, GT_COMMA) GenTreeOp(GT_COMMA, TYP_VOID, argSetup, result); #if DEBUG result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } return result; } } if (call->IsNoReturn()) { // // If we know that the call does not return then we can set fgRemoveRestOfBlock // to remove all subsequent statements and change the call's basic block to BBJ_THROW. // As a result the compiler won't need to preserve live registers across the call. // // This isn't need for tail calls as there shouldn't be any code after the call anyway. // Besides, the tail call code is part of the epilog and converting the block to // BBJ_THROW would result in the tail call being dropped as the epilog is generated // only for BBJ_RETURN blocks. // if (!call->IsTailCall()) { fgRemoveRestOfBlock = true; } } return call; } /***************************************************************************** * * Expand and return the call target address for a VirtualCall * The code here should match that generated by LowerVirtualVtableCall */ GenTree* Compiler::fgExpandVirtualVtableCallTarget(GenTreeCall* call) { GenTree* result; JITDUMP("Expanding virtual call target for %d.%s:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); noway_assert(call->gtCallType == CT_USER_FUNC); // get a reference to the thisPtr being passed fgArgTabEntry* thisArgTabEntry = gtArgEntryByArgNum(call, 0); GenTree* thisPtr = thisArgTabEntry->GetNode(); // fgMorphArgs must enforce this invariant by creating a temp // assert(thisPtr->OperIsLocal()); // Make a copy of the thisPtr by cloning // thisPtr = gtClone(thisPtr, true); noway_assert(thisPtr != nullptr); // Get hold of the vtable offset unsigned vtabOffsOfIndirection; unsigned vtabOffsAfterIndirection; bool isRelative; info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection, &vtabOffsAfterIndirection, &isRelative); // Dereference the this pointer to obtain the method table, it is called vtab below GenTree* vtab; assert(VPTR_OFFS == 0); // We have to add this value to the thisPtr to get the methodTable vtab = gtNewOperNode(GT_IND, TYP_I_IMPL, thisPtr); vtab->gtFlags |= GTF_IND_INVARIANT; // Get the appropriate vtable chunk if (vtabOffsOfIndirection != CORINFO_VIRTUALCALL_NO_CHUNK) { // Note this isRelative code path is currently never executed // as the VM doesn't ever return: isRelative == true // if (isRelative) { // MethodTable offset is a relative pointer. // // Additional temporary variable is used to store virtual table pointer. // Address of method is obtained by the next computations: // // Save relative offset to tmp (vtab is virtual table pointer, vtabOffsOfIndirection is offset of // vtable-1st-level-indirection): // tmp = vtab // // Save address of method to result (vtabOffsAfterIndirection is offset of vtable-2nd-level-indirection): // result = [tmp + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp + vtabOffsOfIndirection]] // // // When isRelative is true we need to setup two temporary variables // var1 = vtab // var2 = var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [var1 + vtabOffsOfIndirection] // result = [var2] + var2 // unsigned varNum1 = lvaGrabTemp(true DEBUGARG("var1 - vtab")); unsigned varNum2 = lvaGrabTemp(true DEBUGARG("var2 - relative")); GenTree* asgVar1 = gtNewTempAssign(varNum1, vtab); // var1 = vtab // [tmp + vtabOffsOfIndirection] GenTree* tmpTree1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL), gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL)); tmpTree1 = gtNewOperNode(GT_IND, TYP_I_IMPL, tmpTree1, false); tmpTree1->gtFlags |= GTF_IND_NONFAULTING; tmpTree1->gtFlags |= GTF_IND_INVARIANT; // var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection GenTree* tmpTree2 = gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL), gtNewIconNode(vtabOffsOfIndirection + vtabOffsAfterIndirection, TYP_I_IMPL)); // var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [var1 + vtabOffsOfIndirection] tmpTree2 = gtNewOperNode(GT_ADD, TYP_I_IMPL, tmpTree2, tmpTree1); GenTree* asgVar2 = gtNewTempAssign(varNum2, tmpTree2); // var2 = <expression> // This last indirection is not invariant, but is non-faulting result = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewLclvNode(varNum2, TYP_I_IMPL), false); // [var2] result->gtFlags |= GTF_IND_NONFAULTING; result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewLclvNode(varNum2, TYP_I_IMPL)); // [var2] + var2 // Now stitch together the two assignment and the calculation of result into a single tree GenTree* commaTree = gtNewOperNode(GT_COMMA, TYP_I_IMPL, asgVar2, result); result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, asgVar1, commaTree); } else { // result = [vtab + vtabOffsOfIndirection] result = gtNewOperNode(GT_ADD, TYP_I_IMPL, vtab, gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL)); result = gtNewOperNode(GT_IND, TYP_I_IMPL, result, false); result->gtFlags |= GTF_IND_NONFAULTING; result->gtFlags |= GTF_IND_INVARIANT; } } else { result = vtab; assert(!isRelative); } if (!isRelative) { // Load the function address // result = [result + vtabOffsAfterIndirection] result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewIconNode(vtabOffsAfterIndirection, TYP_I_IMPL)); // This last indirection is not invariant, but is non-faulting result = gtNewOperNode(GT_IND, TYP_I_IMPL, result, false); result->gtFlags |= GTF_IND_NONFAULTING; } return result; } /***************************************************************************** * * Transform the given constant tree for code generation. */ GenTree* Compiler::fgMorphConst(GenTree* tree) { assert(tree->OperIsConst()); /* Clear any exception flags or other unnecessary flags * that may have been set before folding this node to a constant */ tree->gtFlags &= ~(GTF_ALL_EFFECT | GTF_REVERSE_OPS); if (!tree->OperIs(GT_CNS_STR)) { return tree; } if (tree->AsStrCon()->IsStringEmptyField()) { LPVOID pValue; InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue); return fgMorphTree(gtNewStringLiteralNode(iat, pValue)); } // TODO-CQ: Do this for compCurBB->isRunRarely(). Doing that currently will // guarantee slow performance for that block. Instead cache the return value // of CORINFO_HELP_STRCNS and go to cache first giving reasonable perf. bool useLazyStrCns = false; if (compCurBB->bbJumpKind == BBJ_THROW) { useLazyStrCns = true; } else if (fgGlobalMorph && compCurStmt->GetRootNode()->IsCall()) { // Quick check: if the root node of the current statement happens to be a noreturn call. GenTreeCall* call = compCurStmt->GetRootNode()->AsCall(); useLazyStrCns = call->IsNoReturn() || fgIsThrow(call); } if (useLazyStrCns) { CorInfoHelpFunc helper = info.compCompHnd->getLazyStringLiteralHelper(tree->AsStrCon()->gtScpHnd); if (helper != CORINFO_HELP_UNDEF) { // For un-important blocks, we want to construct the string lazily GenTreeCall::Use* args; if (helper == CORINFO_HELP_STRCNS_CURRENT_MODULE) { args = gtNewCallArgs(gtNewIconNode(RidFromToken(tree->AsStrCon()->gtSconCPX), TYP_INT)); } else { args = gtNewCallArgs(gtNewIconNode(RidFromToken(tree->AsStrCon()->gtSconCPX), TYP_INT), gtNewIconEmbScpHndNode(tree->AsStrCon()->gtScpHnd)); } tree = gtNewHelperCallNode(helper, TYP_REF, args); return fgMorphTree(tree); } } assert(tree->AsStrCon()->gtScpHnd == info.compScopeHnd || !IsUninitialized(tree->AsStrCon()->gtScpHnd)); LPVOID pValue; InfoAccessType iat = info.compCompHnd->constructStringLiteral(tree->AsStrCon()->gtScpHnd, tree->AsStrCon()->gtSconCPX, &pValue); tree = gtNewStringLiteralNode(iat, pValue); return fgMorphTree(tree); } //------------------------------------------------------------------------ // fgMorphTryFoldObjAsLclVar: try to fold an Obj node as a LclVar. // // Arguments: // obj - the obj node. // destroyNodes -- destroy nodes that are optimized away // // Return value: // GenTreeLclVar if the obj can be replaced by it, null otherwise. // // Notes: // TODO-CQ: currently this transformation is done only under copy block, // but it is benefitial to do for each OBJ node. However, `PUT_ARG_STACK` // for some platforms does not expect struct `LCL_VAR` as a source, so // it needs more work. // GenTreeLclVar* Compiler::fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes) { if (opts.OptimizationEnabled()) { GenTree* op1 = obj->Addr(); assert(!op1->OperIs(GT_LCL_VAR_ADDR) && "missed an opt opportunity"); if (op1->OperIs(GT_ADDR)) { GenTreeUnOp* addr = op1->AsUnOp(); GenTree* addrOp = addr->gtGetOp1(); if (addrOp->TypeIs(obj->TypeGet()) && addrOp->OperIs(GT_LCL_VAR)) { GenTreeLclVar* lclVar = addrOp->AsLclVar(); ClassLayout* lclVarLayout = lvaGetDesc(lclVar)->GetLayout(); ClassLayout* objLayout = obj->GetLayout(); if (ClassLayout::AreCompatible(lclVarLayout, objLayout)) { #ifdef DEBUG CORINFO_CLASS_HANDLE objClsHandle = obj->GetLayout()->GetClassHandle(); assert(objClsHandle != NO_CLASS_HANDLE); if (verbose) { CORINFO_CLASS_HANDLE lclClsHnd = gtGetStructHandle(lclVar); printf("fold OBJ(ADDR(X)) [%06u] into X [%06u], ", dspTreeID(obj), dspTreeID(lclVar)); printf("with %s handles\n", ((lclClsHnd == objClsHandle) ? "matching" : "different")); } #endif // Keep the DONT_CSE flag in sync // (as the addr always marks it for its op1) lclVar->gtFlags &= ~GTF_DONT_CSE; lclVar->gtFlags |= (obj->gtFlags & GTF_DONT_CSE); if (destroyNodes) { DEBUG_DESTROY_NODE(obj); DEBUG_DESTROY_NODE(addr); } return lclVar; } } } } return nullptr; } /***************************************************************************** * * Transform the given GTK_LEAF tree for code generation. */ GenTree* Compiler::fgMorphLeaf(GenTree* tree) { assert(tree->OperKind() & GTK_LEAF); if (tree->gtOper == GT_LCL_VAR) { const bool forceRemorph = false; return fgMorphLocalVar(tree, forceRemorph); } else if (tree->gtOper == GT_LCL_FLD) { if (lvaGetDesc(tree->AsLclFld())->IsAddressExposed()) { tree->gtFlags |= GTF_GLOB_REF; } #ifdef TARGET_X86 if (info.compIsVarArgs) { GenTree* newTree = fgMorphStackArgForVarArgs(tree->AsLclFld()->GetLclNum(), tree->TypeGet(), tree->AsLclFld()->GetLclOffs()); if (newTree != nullptr) { if (newTree->OperIsBlk() && ((tree->gtFlags & GTF_VAR_DEF) == 0)) { newTree->SetOper(GT_IND); } return newTree; } } #endif // TARGET_X86 } else if (tree->gtOper == GT_FTN_ADDR) { GenTreeFptrVal* fptrValTree = tree->AsFptrVal(); // A function pointer address is being used. Let the VM know if this is the // target of a Delegate or a raw function pointer. bool isUnsafeFunctionPointer = !fptrValTree->gtFptrDelegateTarget; CORINFO_CONST_LOOKUP addrInfo; #ifdef FEATURE_READYTORUN if (fptrValTree->gtEntryPoint.addr != nullptr) { addrInfo = fptrValTree->gtEntryPoint; } else #endif { info.compCompHnd->getFunctionFixedEntryPoint(fptrValTree->gtFptrMethod, isUnsafeFunctionPointer, &addrInfo); } GenTree* indNode = nullptr; switch (addrInfo.accessType) { case IAT_PPVALUE: indNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)addrInfo.handle, GTF_ICON_CONST_PTR, true); // Add the second indirection indNode = gtNewOperNode(GT_IND, TYP_I_IMPL, indNode); // This indirection won't cause an exception. indNode->gtFlags |= GTF_IND_NONFAULTING; // This indirection also is invariant. indNode->gtFlags |= GTF_IND_INVARIANT; break; case IAT_PVALUE: indNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)addrInfo.handle, GTF_ICON_FTN_ADDR, true); break; case IAT_VALUE: // Refer to gtNewIconHandleNode() as the template for constructing a constant handle // tree->SetOper(GT_CNS_INT); tree->AsIntConCommon()->SetIconValue(ssize_t(addrInfo.handle)); tree->gtFlags |= GTF_ICON_FTN_ADDR; break; default: noway_assert(!"Unknown addrInfo.accessType"); } if (indNode != nullptr) { DEBUG_DESTROY_NODE(tree); tree = fgMorphTree(indNode); } } return tree; } void Compiler::fgAssignSetVarDef(GenTree* tree) { GenTreeLclVarCommon* lclVarCmnTree; bool isEntire = false; if (tree->DefinesLocal(this, &lclVarCmnTree, &isEntire)) { if (isEntire) { lclVarCmnTree->gtFlags |= GTF_VAR_DEF; } else { // We consider partial definitions to be modeled as uses followed by definitions. // This captures the idea that precedings defs are not necessarily made redundant // by this definition. lclVarCmnTree->gtFlags |= (GTF_VAR_DEF | GTF_VAR_USEASG); } } } //------------------------------------------------------------------------ // fgMorphOneAsgBlockOp: Attempt to replace a block assignment with a scalar assignment // // Arguments: // tree - The block assignment to be possibly morphed // // Return Value: // The modified tree if successful, nullptr otherwise. // // Assumptions: // 'tree' must be a block assignment. // // Notes: // If successful, this method always returns the incoming tree, modifying only // its arguments. // GenTree* Compiler::fgMorphOneAsgBlockOp(GenTree* tree) { // This must be a block assignment. noway_assert(tree->OperIsBlkOp()); var_types asgType = tree->TypeGet(); GenTree* asg = tree; GenTree* dest = asg->gtGetOp1(); GenTree* src = asg->gtGetOp2(); unsigned destVarNum = BAD_VAR_NUM; LclVarDsc* destVarDsc = nullptr; GenTree* destLclVarTree = nullptr; bool isCopyBlock = asg->OperIsCopyBlkOp(); bool isInitBlock = !isCopyBlock; unsigned size = 0; CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE; if (dest->gtEffectiveVal()->OperIsBlk()) { GenTreeBlk* lhsBlk = dest->gtEffectiveVal()->AsBlk(); size = lhsBlk->Size(); if (impIsAddressInLocal(lhsBlk->Addr(), &destLclVarTree)) { destVarNum = destLclVarTree->AsLclVarCommon()->GetLclNum(); destVarDsc = lvaGetDesc(destVarNum); } if (lhsBlk->OperGet() == GT_OBJ) { clsHnd = lhsBlk->AsObj()->GetLayout()->GetClassHandle(); } } else { // Is this an enregisterable struct that is already a simple assignment? // This can happen if we are re-morphing. // Note that we won't do this straightaway if this is a SIMD type, since it // may be a promoted lclVar (sometimes we promote the individual float fields of // fixed-size SIMD). if (dest->OperGet() == GT_IND) { noway_assert(asgType != TYP_STRUCT); if (varTypeIsStruct(asgType)) { destLclVarTree = fgIsIndirOfAddrOfLocal(dest); } if (isCopyBlock && destLclVarTree == nullptr && !src->OperIs(GT_LCL_VAR)) { fgMorphBlockOperand(src, asgType, genTypeSize(asgType), false /*isBlkReqd*/); dest->gtFlags |= GTF_DONT_CSE; return tree; } } else { noway_assert(dest->OperIsLocal()); destLclVarTree = dest; } if (destLclVarTree != nullptr) { destVarNum = destLclVarTree->AsLclVarCommon()->GetLclNum(); destVarDsc = lvaGetDesc(destVarNum); if (asgType == TYP_STRUCT) { clsHnd = destVarDsc->GetStructHnd(); size = destVarDsc->lvExactSize; } } if (asgType != TYP_STRUCT) { size = genTypeSize(asgType); } } if (size == 0) { return nullptr; } if ((destVarDsc != nullptr) && varTypeIsStruct(destLclVarTree) && destVarDsc->lvPromoted) { // Let fgMorphCopyBlock handle it. return nullptr; } if (src->IsCall() || src->OperIsSIMD()) { // Can't take ADDR from these nodes, let fgMorphCopyBlock handle it, #11413. return nullptr; } if ((destVarDsc != nullptr) && !varTypeIsStruct(destVarDsc->TypeGet())) { // // See if we can do a simple transformation: // // GT_ASG <TYP_size> // / \. // GT_IND GT_IND or CNS_INT // | | // [dest] [src] // if (asgType == TYP_STRUCT) { // It is possible to use `initobj` to init a primitive type on the stack, // like `ldloca.s 1; initobj 1B000003` where `V01` has type `ref`; // in this case we generate `ASG struct(BLK<8> struct(ADDR byref(LCL_VAR ref)), 0)` // and this code path transforms it into `ASG ref(LCL_VARref, 0)` because it is not a real // struct assignment. if (size == REGSIZE_BYTES) { if (clsHnd == NO_CLASS_HANDLE) { // A register-sized cpblk can be treated as an integer asignment. asgType = TYP_I_IMPL; } else { BYTE gcPtr; info.compCompHnd->getClassGClayout(clsHnd, &gcPtr); asgType = getJitGCType(gcPtr); } } else { switch (size) { case 1: asgType = TYP_BYTE; break; case 2: asgType = TYP_SHORT; break; #ifdef TARGET_64BIT case 4: asgType = TYP_INT; break; #endif // TARGET_64BIT } } } } GenTree* srcLclVarTree = nullptr; LclVarDsc* srcVarDsc = nullptr; if (isCopyBlock) { if (src->OperGet() == GT_LCL_VAR) { srcLclVarTree = src; srcVarDsc = lvaGetDesc(src->AsLclVarCommon()); } else if (src->OperIsIndir() && impIsAddressInLocal(src->AsOp()->gtOp1, &srcLclVarTree)) { srcVarDsc = lvaGetDesc(srcLclVarTree->AsLclVarCommon()); } if ((srcVarDsc != nullptr) && varTypeIsStruct(srcLclVarTree) && srcVarDsc->lvPromoted) { // Let fgMorphCopyBlock handle it. return nullptr; } } if (asgType != TYP_STRUCT) { noway_assert((size <= REGSIZE_BYTES) || varTypeIsSIMD(asgType)); // For initBlk, a non constant source is not going to allow us to fiddle // with the bits to create a single assigment. // Nor do we (for now) support transforming an InitBlock of SIMD type, unless // it is a direct assignment to a lclVar and the value is zero. if (isInitBlock) { if (!src->IsConstInitVal()) { return nullptr; } if (varTypeIsSIMD(asgType) && (!src->IsIntegralConst(0) || (destVarDsc == nullptr))) { return nullptr; } } if (destVarDsc != nullptr) { // Kill everything about dest if (optLocalAssertionProp) { if (optAssertionCount > 0) { fgKillDependentAssertions(destVarNum DEBUGARG(tree)); } } // A previous incarnation of this code also required the local not to be // address-exposed(=taken). That seems orthogonal to the decision of whether // to do field-wise assignments: being address-exposed will cause it to be // "dependently" promoted, so it will be in the right memory location. One possible // further reason for avoiding field-wise stores is that the struct might have alignment-induced // holes, whose contents could be meaningful in unsafe code. If we decide that's a valid // concern, then we could compromise, and say that address-exposed + fields do not completely cover the // memory of the struct prevent field-wise assignments. Same situation exists for the "src" decision. if (varTypeIsStruct(destLclVarTree) && destVarDsc->lvPromoted) { // Let fgMorphInitBlock handle it. (Since we'll need to do field-var-wise assignments.) return nullptr; } else if (!varTypeIsFloating(destLclVarTree->TypeGet()) && (size == genTypeSize(destVarDsc))) { // Use the dest local var directly, as well as its type. dest = destLclVarTree; asgType = destVarDsc->lvType; // If the block operation had been a write to a local var of a small int type, // of the exact size of the small int type, and the var is NormalizeOnStore, // we would have labeled it GTF_VAR_USEASG, because the block operation wouldn't // have done that normalization. If we're now making it into an assignment, // the NormalizeOnStore will work, and it can be a full def. if (destVarDsc->lvNormalizeOnStore()) { dest->gtFlags &= (~GTF_VAR_USEASG); } } else { // Could be a non-promoted struct, or a floating point type local, or // an int subject to a partial write. Don't enregister. lvaSetVarDoNotEnregister(destVarNum DEBUGARG(DoNotEnregisterReason::OneAsgRetyping)); // Mark the local var tree as a definition point of the local. destLclVarTree->gtFlags |= GTF_VAR_DEF; if (size < destVarDsc->lvExactSize) { // If it's not a full-width assignment.... destLclVarTree->gtFlags |= GTF_VAR_USEASG; } if (dest == destLclVarTree) { GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest); dest = gtNewIndir(asgType, addr); } } } // Check to ensure we don't have a reducible *(& ... ) if (dest->OperIsIndir() && dest->AsIndir()->Addr()->OperGet() == GT_ADDR) { // If dest is an Indir or Block, and it has a child that is a Addr node // GenTree* addrNode = dest->AsIndir()->Addr(); // known to be a GT_ADDR // Can we just remove the Ind(Addr(destOp)) and operate directly on 'destOp'? // GenTree* destOp = addrNode->gtGetOp1(); var_types destOpType = destOp->TypeGet(); // We can if we have a primitive integer type and the sizes are exactly the same. // if ((varTypeIsIntegralOrI(destOp) && (size == genTypeSize(destOpType)))) { dest = destOp; asgType = destOpType; } } if (dest->gtEffectiveVal()->OperIsIndir()) { // If we have no information about the destination, we have to assume it could // live anywhere (not just in the GC heap). // Mark the GT_IND node so that we use the correct write barrier helper in case // the field is a GC ref. if (!fgIsIndirOfAddrOfLocal(dest)) { dest->gtFlags |= (GTF_GLOB_REF | GTF_IND_TGTANYWHERE); tree->gtFlags |= GTF_GLOB_REF; } dest->SetIndirExceptionFlags(this); tree->gtFlags |= (dest->gtFlags & GTF_EXCEPT); } if (isCopyBlock) { if (srcVarDsc != nullptr) { // Handled above. assert(!varTypeIsStruct(srcLclVarTree) || !srcVarDsc->lvPromoted); if (!varTypeIsFloating(srcLclVarTree->TypeGet()) && size == genTypeSize(genActualType(srcLclVarTree->TypeGet()))) { // Use the src local var directly. src = srcLclVarTree; } else { // The source argument of the copyblk can potentially be accessed only through indir(addr(lclVar)) // or indir(lclVarAddr) so it must be on the stack. unsigned lclVarNum = srcLclVarTree->AsLclVarCommon()->GetLclNum(); lvaSetVarDoNotEnregister(lclVarNum DEBUGARG(DoNotEnregisterReason::OneAsgRetyping)); GenTree* srcAddr; if (src == srcLclVarTree) { srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src); src = gtNewOperNode(GT_IND, asgType, srcAddr); } else { assert(src->OperIsIndir()); } } } if (src->OperIsIndir()) { if (!fgIsIndirOfAddrOfLocal(src)) { // If we have no information about the src, we have to assume it could // live anywhere (not just in the GC heap). // Mark the GT_IND node so that we use the correct write barrier helper in case // the field is a GC ref. src->gtFlags |= (GTF_GLOB_REF | GTF_IND_TGTANYWHERE); } src->SetIndirExceptionFlags(this); } } else // InitBlk { #ifdef FEATURE_SIMD if (varTypeIsSIMD(asgType)) { assert(!isCopyBlock); // Else we would have returned the tree above. noway_assert(src->IsIntegralConst(0)); noway_assert(destVarDsc != nullptr); src = gtNewSIMDNode(asgType, src, SIMDIntrinsicInit, destVarDsc->GetSimdBaseJitType(), size); } else #endif { if (src->OperIsInitVal()) { src = src->gtGetOp1(); } assert(src->IsCnsIntOrI()); // This will mutate the integer constant, in place, to be the correct // value for the type we are using in the assignment. src->AsIntCon()->FixupInitBlkValue(asgType); } } // Ensure that the dest is setup appropriately. if (dest->gtEffectiveVal()->OperIsIndir()) { dest = fgMorphBlockOperand(dest, asgType, size, false /*isBlkReqd*/); } // Ensure that the rhs is setup appropriately. if (isCopyBlock) { src = fgMorphBlockOperand(src, asgType, size, false /*isBlkReqd*/); } // Set the lhs and rhs on the assignment. if (dest != tree->AsOp()->gtOp1) { asg->AsOp()->gtOp1 = dest; } if (src != asg->AsOp()->gtOp2) { asg->AsOp()->gtOp2 = src; } asg->ChangeType(asgType); dest->gtFlags |= GTF_DONT_CSE; asg->gtFlags &= ~GTF_EXCEPT; asg->gtFlags |= ((dest->gtFlags | src->gtFlags) & GTF_ALL_EFFECT); // Un-set GTF_REVERSE_OPS, and it will be set later if appropriate. asg->gtFlags &= ~GTF_REVERSE_OPS; #ifdef DEBUG if (verbose) { printf("fgMorphOneAsgBlock (after):\n"); gtDispTree(tree); } #endif return tree; } return nullptr; } //------------------------------------------------------------------------ // fgMorphPromoteLocalInitBlock: Attempts to promote a local block init tree // to a tree of promoted field initialization assignments. // // Arguments: // destLclNode - The destination LclVar node // initVal - The initialization value // blockSize - The amount of bytes to initialize // // Return Value: // A tree that performs field by field initialization of the destination // struct variable if various conditions are met, nullptr otherwise. // // Notes: // This transforms a single block initialization assignment like: // // * ASG struct (init) // +--* BLK(12) struct // | \--* ADDR long // | \--* LCL_VAR struct(P) V02 loc0 // | \--* int V02.a (offs=0x00) -> V06 tmp3 // | \--* ubyte V02.c (offs=0x04) -> V07 tmp4 // | \--* float V02.d (offs=0x08) -> V08 tmp5 // \--* INIT_VAL int // \--* CNS_INT int 42 // // into a COMMA tree of assignments that initialize each promoted struct // field: // // * COMMA void // +--* COMMA void // | +--* ASG int // | | +--* LCL_VAR int V06 tmp3 // | | \--* CNS_INT int 0x2A2A2A2A // | \--* ASG ubyte // | +--* LCL_VAR ubyte V07 tmp4 // | \--* CNS_INT int 42 // \--* ASG float // +--* LCL_VAR float V08 tmp5 // \--* CNS_DBL float 1.5113661732714390e-13 // GenTree* Compiler::fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize) { assert(destLclNode->OperIs(GT_LCL_VAR)); LclVarDsc* destLclVar = lvaGetDesc(destLclNode); assert(varTypeIsStruct(destLclVar->TypeGet())); assert(destLclVar->lvPromoted); if (blockSize == 0) { JITDUMP(" size is zero or unknown.\n"); return nullptr; } if (destLclVar->IsAddressExposed() && destLclVar->lvContainsHoles) { JITDUMP(" dest is address exposed and contains holes.\n"); return nullptr; } if (destLclVar->lvCustomLayout && destLclVar->lvContainsHoles) { // TODO-1stClassStructs: there are no reasons for this pessimization, delete it. JITDUMP(" dest has custom layout and contains holes.\n"); return nullptr; } if (destLclVar->lvExactSize != blockSize) { JITDUMP(" dest size mismatch.\n"); return nullptr; } if (!initVal->OperIs(GT_CNS_INT)) { JITDUMP(" source is not constant.\n"); return nullptr; } const int64_t initPattern = (initVal->AsIntCon()->IconValue() & 0xFF) * 0x0101010101010101LL; if (initPattern != 0) { for (unsigned i = 0; i < destLclVar->lvFieldCnt; ++i) { LclVarDsc* fieldDesc = lvaGetDesc(destLclVar->lvFieldLclStart + i); if (varTypeIsSIMD(fieldDesc->TypeGet()) || varTypeIsGC(fieldDesc->TypeGet())) { // Cannot initialize GC or SIMD types with a non-zero constant. // The former is completly bogus. The later restriction could be // lifted by supporting non-zero SIMD constants or by generating // field initialization code that converts an integer constant to // the appropiate SIMD value. Unlikely to be very useful, though. JITDUMP(" dest contains GC and/or SIMD fields and source constant is not 0.\n"); return nullptr; } } } JITDUMP(" using field by field initialization.\n"); GenTree* tree = nullptr; for (unsigned i = 0; i < destLclVar->lvFieldCnt; ++i) { unsigned fieldLclNum = destLclVar->lvFieldLclStart + i; LclVarDsc* fieldDesc = lvaGetDesc(fieldLclNum); GenTree* dest = gtNewLclvNode(fieldLclNum, fieldDesc->TypeGet()); // If it had been labeled a "USEASG", assignments to the individual promoted fields are not. dest->gtFlags |= (destLclNode->gtFlags & ~(GTF_NODE_MASK | GTF_VAR_USEASG)); GenTree* src; switch (dest->TypeGet()) { case TYP_BOOL: case TYP_BYTE: case TYP_UBYTE: case TYP_SHORT: case TYP_USHORT: // Promoted fields are expected to be "normalize on load". If that changes then // we may need to adjust this code to widen the constant correctly. assert(fieldDesc->lvNormalizeOnLoad()); FALLTHROUGH; case TYP_INT: { int64_t mask = (int64_t(1) << (genTypeSize(dest->TypeGet()) * 8)) - 1; src = gtNewIconNode(static_cast<int32_t>(initPattern & mask)); break; } case TYP_LONG: src = gtNewLconNode(initPattern); break; case TYP_FLOAT: float floatPattern; memcpy(&floatPattern, &initPattern, sizeof(floatPattern)); src = gtNewDconNode(floatPattern, dest->TypeGet()); break; case TYP_DOUBLE: double doublePattern; memcpy(&doublePattern, &initPattern, sizeof(doublePattern)); src = gtNewDconNode(doublePattern, dest->TypeGet()); break; case TYP_REF: case TYP_BYREF: #ifdef FEATURE_SIMD case TYP_SIMD8: case TYP_SIMD12: case TYP_SIMD16: case TYP_SIMD32: #endif // FEATURE_SIMD assert(initPattern == 0); src = gtNewIconNode(0, dest->TypeGet()); break; default: unreached(); } GenTree* asg = gtNewAssignNode(dest, src); if (optLocalAssertionProp) { optAssertionGen(asg); } if (tree != nullptr) { tree = gtNewOperNode(GT_COMMA, TYP_VOID, tree, asg); } else { tree = asg; } } return tree; } //------------------------------------------------------------------------ // fgMorphGetStructAddr: Gets the address of a struct object // // Arguments: // pTree - the parent's pointer to the struct object node // clsHnd - the class handle for the struct type // isRValue - true if this is a source (not dest) // // Return Value: // Returns the address of the struct value, possibly modifying the existing tree to // sink the address below any comma nodes (this is to canonicalize for value numbering). // If this is a source, it will morph it to an GT_IND before taking its address, // since it may not be remorphed (and we don't want blk nodes as rvalues). GenTree* Compiler::fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue) { GenTree* addr; GenTree* tree = *pTree; // If this is an indirection, we can return its op1, unless it's a GTF_IND_ARR_INDEX, in which case we // need to hang onto that for the purposes of value numbering. if (tree->OperIsIndir()) { if ((tree->gtFlags & GTF_IND_ARR_INDEX) == 0) { addr = tree->AsOp()->gtOp1; } else { if (isRValue && tree->OperIsBlk()) { tree->ChangeOper(GT_IND); } addr = gtNewOperNode(GT_ADDR, TYP_BYREF, tree); } } else if (tree->gtOper == GT_COMMA) { // If this is a comma, we're going to "sink" the GT_ADDR below it. (void)fgMorphGetStructAddr(&(tree->AsOp()->gtOp2), clsHnd, isRValue); tree->gtType = TYP_BYREF; addr = tree; } else { switch (tree->gtOper) { case GT_LCL_FLD: case GT_LCL_VAR: case GT_INDEX: case GT_FIELD: case GT_ARR_ELEM: addr = gtNewOperNode(GT_ADDR, TYP_BYREF, tree); break; case GT_INDEX_ADDR: addr = tree; break; default: { // TODO: Consider using lvaGrabTemp and gtNewTempAssign instead, since we're // not going to use "temp" GenTree* temp = fgInsertCommaFormTemp(pTree, clsHnd); unsigned lclNum = temp->gtEffectiveVal()->AsLclVar()->GetLclNum(); lvaSetVarDoNotEnregister(lclNum DEBUG_ARG(DoNotEnregisterReason::VMNeedsStackAddr)); addr = fgMorphGetStructAddr(pTree, clsHnd, isRValue); break; } } } *pTree = addr; return addr; } //------------------------------------------------------------------------ // fgMorphBlockOperand: Canonicalize an operand of a block assignment // // Arguments: // tree - The block operand // asgType - The type of the assignment // blockWidth - The size of the block // isBlkReqd - true iff this operand must remain a block node // // Return Value: // Returns the morphed block operand // // Notes: // This does the following: // - Ensures that a struct operand is a block node or lclVar. // - Ensures that any COMMAs are above ADDR nodes. // Although 'tree' WAS an operand of a block assignment, the assignment // may have been retyped to be a scalar assignment. GenTree* Compiler::fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd) { GenTree* effectiveVal = tree->gtEffectiveVal(); if (asgType != TYP_STRUCT) { if (effectiveVal->OperIsIndir()) { if (!isBlkReqd) { GenTree* addr = effectiveVal->AsIndir()->Addr(); if ((addr->OperGet() == GT_ADDR) && (addr->gtGetOp1()->TypeGet() == asgType)) { effectiveVal = addr->gtGetOp1(); } else if (effectiveVal->OperIsBlk()) { effectiveVal->SetOper(GT_IND); } } effectiveVal->gtType = asgType; } else if (effectiveVal->TypeGet() != asgType) { if (effectiveVal->IsCall()) { #ifdef DEBUG GenTreeCall* call = effectiveVal->AsCall(); assert(call->TypeGet() == TYP_STRUCT); assert(blockWidth == info.compCompHnd->getClassSize(call->gtRetClsHnd)); #endif } else { GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, effectiveVal); effectiveVal = gtNewIndir(asgType, addr); } } } else { GenTreeIndir* indirTree = nullptr; GenTreeLclVarCommon* lclNode = nullptr; bool needsIndirection = true; if (effectiveVal->OperIsIndir()) { indirTree = effectiveVal->AsIndir(); GenTree* addr = effectiveVal->AsIndir()->Addr(); if ((addr->OperGet() == GT_ADDR) && (addr->gtGetOp1()->OperGet() == GT_LCL_VAR)) { lclNode = addr->gtGetOp1()->AsLclVarCommon(); } } else if (effectiveVal->OperGet() == GT_LCL_VAR) { lclNode = effectiveVal->AsLclVarCommon(); } else if (effectiveVal->IsCall()) { needsIndirection = false; #ifdef DEBUG GenTreeCall* call = effectiveVal->AsCall(); assert(call->TypeGet() == TYP_STRUCT); assert(blockWidth == info.compCompHnd->getClassSize(call->gtRetClsHnd)); #endif } #ifdef TARGET_ARM64 else if (effectiveVal->OperIsHWIntrinsic()) { needsIndirection = false; #ifdef DEBUG GenTreeHWIntrinsic* intrinsic = effectiveVal->AsHWIntrinsic(); assert(intrinsic->TypeGet() == TYP_STRUCT); assert(HWIntrinsicInfo::IsMultiReg(intrinsic->GetHWIntrinsicId())); #endif } #endif // TARGET_ARM64 if (lclNode != nullptr) { const LclVarDsc* varDsc = lvaGetDesc(lclNode); if (varTypeIsStruct(varDsc) && (varDsc->lvExactSize == blockWidth) && (varDsc->lvType == asgType)) { if (effectiveVal != lclNode) { JITDUMP("Replacing block node [%06d] with lclVar V%02u\n", dspTreeID(tree), lclNode->GetLclNum()); effectiveVal = lclNode; } needsIndirection = false; } else { // This may be a lclVar that was determined to be address-exposed. effectiveVal->gtFlags |= (lclNode->gtFlags & GTF_ALL_EFFECT); } } if (needsIndirection) { if (indirTree != nullptr) { // If we have an indirection and a block is required, it should already be a block. assert(indirTree->OperIsBlk() || !isBlkReqd); effectiveVal->gtType = asgType; } else { GenTree* newTree; GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, effectiveVal); if (isBlkReqd) { CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleIfPresent(effectiveVal); if (clsHnd == NO_CLASS_HANDLE) { newTree = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, typGetBlkLayout(blockWidth)); } else { newTree = gtNewObjNode(clsHnd, addr); gtSetObjGcInfo(newTree->AsObj()); } } else { newTree = gtNewIndir(asgType, addr); } effectiveVal = newTree; } } } assert(effectiveVal->TypeIs(asgType) || (varTypeIsSIMD(asgType) && varTypeIsStruct(effectiveVal))); tree = effectiveVal; return tree; } //------------------------------------------------------------------------ // fgMorphCanUseLclFldForCopy: check if we can access LclVar2 using LclVar1's fields. // // Arguments: // lclNum1 - a promoted lclVar that is used in fieldwise assignment; // lclNum2 - the local variable on the other side of ASG, can be BAD_VAR_NUM. // // Return Value: // True if the second local is valid and has the same struct handle as the first, // false otherwise. // // Notes: // This check is needed to avoid accessing LCL_VARs with incorrect // CORINFO_FIELD_HANDLE that would confuse VN optimizations. // bool Compiler::fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2) { assert(lclNum1 != BAD_VAR_NUM); if (lclNum2 == BAD_VAR_NUM) { return false; } const LclVarDsc* varDsc1 = lvaGetDesc(lclNum1); const LclVarDsc* varDsc2 = lvaGetDesc(lclNum2); assert(varTypeIsStruct(varDsc1)); if (!varTypeIsStruct(varDsc2)) { return false; } CORINFO_CLASS_HANDLE struct1 = varDsc1->GetStructHnd(); CORINFO_CLASS_HANDLE struct2 = varDsc2->GetStructHnd(); assert(struct1 != NO_CLASS_HANDLE); assert(struct2 != NO_CLASS_HANDLE); if (struct1 != struct2) { return false; } return true; } // insert conversions and normalize to make tree amenable to register // FP architectures GenTree* Compiler::fgMorphForRegisterFP(GenTree* tree) { if (tree->OperIsArithmetic()) { if (varTypeIsFloating(tree)) { GenTree* op1 = tree->AsOp()->gtOp1; GenTree* op2 = tree->gtGetOp2(); assert(varTypeIsFloating(op1->TypeGet()) && varTypeIsFloating(op2->TypeGet())); if (op1->TypeGet() != tree->TypeGet()) { tree->AsOp()->gtOp1 = gtNewCastNode(tree->TypeGet(), op1, false, tree->TypeGet()); } if (op2->TypeGet() != tree->TypeGet()) { tree->AsOp()->gtOp2 = gtNewCastNode(tree->TypeGet(), op2, false, tree->TypeGet()); } } } else if (tree->OperIsCompare()) { GenTree* op1 = tree->AsOp()->gtOp1; if (varTypeIsFloating(op1)) { GenTree* op2 = tree->gtGetOp2(); assert(varTypeIsFloating(op2)); if (op1->TypeGet() != op2->TypeGet()) { // both had better be floating, just one bigger than other if (op1->TypeGet() == TYP_FLOAT) { assert(op2->TypeGet() == TYP_DOUBLE); tree->AsOp()->gtOp1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE); } else if (op2->TypeGet() == TYP_FLOAT) { assert(op1->TypeGet() == TYP_DOUBLE); tree->AsOp()->gtOp2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE); } } } } return tree; } #ifdef FEATURE_SIMD //-------------------------------------------------------------------------------------------------------------- // getSIMDStructFromField: // Checking whether the field belongs to a simd struct or not. If it is, return the GenTree* for // the struct node, also base type, field index and simd size. If it is not, just return nullptr. // Usually if the tree node is from a simd lclvar which is not used in any SIMD intrinsic, then we // should return nullptr, since in this case we should treat SIMD struct as a regular struct. // However if no matter what, you just want get simd struct node, you can set the ignoreUsedInSIMDIntrinsic // as true. Then there will be no IsUsedInSIMDIntrinsic checking, and it will return SIMD struct node // if the struct is a SIMD struct. // // Arguments: // tree - GentreePtr. This node will be checked to see this is a field which belongs to a simd // struct used for simd intrinsic or not. // simdBaseJitTypeOut - CorInfoType pointer, if the tree node is the tree we want, we set *simdBaseJitTypeOut // to simd lclvar's base JIT type. // indexOut - unsigned pointer, if the tree is used for simd intrinsic, we will set *indexOut // equals to the index number of this field. // simdSizeOut - unsigned pointer, if the tree is used for simd intrinsic, set the *simdSizeOut // equals to the simd struct size which this tree belongs to. // ignoreUsedInSIMDIntrinsic - bool. If this is set to true, then this function will ignore // the UsedInSIMDIntrinsic check. // // return value: // A GenTree* which points the simd lclvar tree belongs to. If the tree is not the simd // instrinic related field, return nullptr. // GenTree* Compiler::getSIMDStructFromField(GenTree* tree, CorInfoType* simdBaseJitTypeOut, unsigned* indexOut, unsigned* simdSizeOut, bool ignoreUsedInSIMDIntrinsic /*false*/) { GenTree* ret = nullptr; if (tree->OperGet() == GT_FIELD) { GenTree* objRef = tree->AsField()->GetFldObj(); if (objRef != nullptr) { GenTree* obj = nullptr; if (objRef->gtOper == GT_ADDR) { obj = objRef->AsOp()->gtOp1; } else if (ignoreUsedInSIMDIntrinsic) { obj = objRef; } else { return nullptr; } if (isSIMDTypeLocal(obj)) { LclVarDsc* varDsc = lvaGetDesc(obj->AsLclVarCommon()); if (varDsc->lvIsUsedInSIMDIntrinsic() || ignoreUsedInSIMDIntrinsic) { *simdSizeOut = varDsc->lvExactSize; *simdBaseJitTypeOut = getBaseJitTypeOfSIMDLocal(obj); ret = obj; } } else if (obj->OperGet() == GT_SIMD) { ret = obj; GenTreeSIMD* simdNode = obj->AsSIMD(); *simdSizeOut = simdNode->GetSimdSize(); *simdBaseJitTypeOut = simdNode->GetSimdBaseJitType(); } #ifdef FEATURE_HW_INTRINSICS else if (obj->OperIsHWIntrinsic()) { ret = obj; GenTreeHWIntrinsic* simdNode = obj->AsHWIntrinsic(); *simdSizeOut = simdNode->GetSimdSize(); *simdBaseJitTypeOut = simdNode->GetSimdBaseJitType(); } #endif // FEATURE_HW_INTRINSICS } } if (ret != nullptr) { var_types fieldType = tree->TypeGet(); if (fieldType == TYP_LONG) { // Vector2/3/4 expose public float fields while Vector<T> // and Vector64/128/256<T> have internal ulong fields. So // we should only ever encounter accesses for TYP_FLOAT or // TYP_LONG and in the case of the latter we don't want the // generic type since we are executing some algorithm on the // raw underlying bits instead. *simdBaseJitTypeOut = CORINFO_TYPE_ULONG; } else { assert(fieldType == TYP_FLOAT); } unsigned baseTypeSize = genTypeSize(JITtype2varType(*simdBaseJitTypeOut)); *indexOut = tree->AsField()->gtFldOffset / baseTypeSize; } return ret; } /***************************************************************************** * If a read operation tries to access simd struct field, then transform the operation * to the SimdGetElementNode, and return the new tree. Otherwise, return the old tree. * Argument: * tree - GenTree*. If this pointer points to simd struct which is used for simd * intrinsic, we will morph it as simd intrinsic NI_Vector128_GetElement. * Return: * A GenTree* which points to the new tree. If the tree is not for simd intrinsic, * return nullptr. */ GenTree* Compiler::fgMorphFieldToSimdGetElement(GenTree* tree) { unsigned index = 0; CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned simdSize = 0; GenTree* simdStructNode = getSIMDStructFromField(tree, &simdBaseJitType, &index, &simdSize); if (simdStructNode != nullptr) { var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); GenTree* op2 = gtNewIconNode(index, TYP_INT); assert(simdSize <= 16); assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType))); tree = gtNewSimdGetElementNode(simdBaseType, simdStructNode, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } return tree; } /***************************************************************************** * Transform an assignment of a SIMD struct field to SimdWithElementNode, and * return a new tree. If it is not such an assignment, then return the old tree. * Argument: * tree - GenTree*. If this pointer points to simd struct which is used for simd * intrinsic, we will morph it as simd intrinsic set. * Return: * A GenTree* which points to the new tree. If the tree is not for simd intrinsic, * return nullptr. */ GenTree* Compiler::fgMorphFieldAssignToSimdSetElement(GenTree* tree) { assert(tree->OperGet() == GT_ASG); unsigned index = 0; CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned simdSize = 0; GenTree* simdStructNode = getSIMDStructFromField(tree->gtGetOp1(), &simdBaseJitType, &index, &simdSize); if (simdStructNode != nullptr) { var_types simdType = simdStructNode->gtType; var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(simdSize <= 16); assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType))); GenTree* op2 = gtNewIconNode(index, TYP_INT); GenTree* op3 = tree->gtGetOp2(); NamedIntrinsic intrinsicId = NI_Vector128_WithElement; GenTree* target = gtClone(simdStructNode); assert(target != nullptr); GenTree* simdTree = gtNewSimdWithElementNode(simdType, simdStructNode, op2, op3, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); tree->AsOp()->gtOp1 = target; tree->AsOp()->gtOp2 = simdTree; // fgMorphTree has already called fgMorphImplicitByRefArgs() on this assignment, but the source // and target have not yet been morphed. // Therefore, in case the source and/or target are now implicit byrefs, we need to call it again. if (fgMorphImplicitByRefArgs(tree)) { if (tree->gtGetOp1()->OperIsBlk()) { assert(tree->gtGetOp1()->TypeGet() == simdType); tree->gtGetOp1()->SetOper(GT_IND); tree->gtGetOp1()->gtType = simdType; } } #ifdef DEBUG tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif } return tree; } #endif // FEATURE_SIMD //------------------------------------------------------------------------------ // fgMorphCommutative : Try to simplify "(X op C1) op C2" to "X op C3" // for commutative operators. // // Arguments: // tree - node to fold // // return value: // A folded GenTree* instance or nullptr if something prevents folding. // GenTreeOp* Compiler::fgMorphCommutative(GenTreeOp* tree) { assert(varTypeIsIntegralOrI(tree->TypeGet())); assert(tree->OperIs(GT_ADD, GT_MUL, GT_OR, GT_AND, GT_XOR)); // op1 can be GT_COMMA, in this case we're going to fold // "(op (COMMA(... (op X C1))) C2)" to "(COMMA(... (op X C3)))" GenTree* op1 = tree->gtGetOp1()->gtEffectiveVal(true); genTreeOps oper = tree->OperGet(); if (!op1->OperIs(oper) || !tree->gtGetOp2()->IsCnsIntOrI() || !op1->gtGetOp2()->IsCnsIntOrI() || op1->gtGetOp1()->IsCnsIntOrI()) { return nullptr; } if (!fgGlobalMorph && (op1 != tree->gtGetOp1())) { // Since 'tree->gtGetOp1()' can have complex structure (e.g. COMMA(..(COMMA(..,op1))) // don't run the optimization for such trees outside of global morph. // Otherwise, there is a chance of violating VNs invariants and/or modifying a tree // that is an active CSE candidate. return nullptr; } if (gtIsActiveCSE_Candidate(tree) || gtIsActiveCSE_Candidate(op1)) { // The optimization removes 'tree' from IR and changes the value of 'op1'. return nullptr; } if (tree->OperMayOverflow() && (tree->gtOverflow() || op1->gtOverflow())) { return nullptr; } GenTreeIntCon* cns1 = op1->gtGetOp2()->AsIntCon(); GenTreeIntCon* cns2 = tree->gtGetOp2()->AsIntCon(); if (!varTypeIsIntegralOrI(tree->TypeGet()) || cns1->TypeIs(TYP_REF) || !cns1->TypeIs(cns2->TypeGet())) { return nullptr; } if (gtIsActiveCSE_Candidate(cns1) || gtIsActiveCSE_Candidate(cns2)) { // The optimization removes 'cns2' from IR and changes the value of 'cns1'. return nullptr; } GenTree* folded = gtFoldExprConst(gtNewOperNode(oper, cns1->TypeGet(), cns1, cns2)); if (!folded->IsCnsIntOrI()) { // Give up if we can't fold "C1 op C2" return nullptr; } auto foldedCns = folded->AsIntCon(); cns1->SetIconValue(foldedCns->IconValue()); cns1->SetVNsFromNode(foldedCns); cns1->gtFieldSeq = foldedCns->gtFieldSeq; op1 = tree->gtGetOp1(); op1->SetVNsFromNode(tree); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(cns2); DEBUG_DESTROY_NODE(foldedCns); INDEBUG(cns1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return op1->AsOp(); } //------------------------------------------------------------------------------ // fgMorphCastedBitwiseOp : Try to simplify "(T)x op (T)y" to "(T)(x op y)". // // Arguments: // tree - node to fold // // Return Value: // A folded GenTree* instance, or nullptr if it couldn't be folded GenTree* Compiler::fgMorphCastedBitwiseOp(GenTreeOp* tree) { // This transform does not preserve VNs and deletes a node. assert(fgGlobalMorph); assert(varTypeIsIntegralOrI(tree)); assert(tree->OperIs(GT_OR, GT_AND, GT_XOR)); GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); genTreeOps oper = tree->OperGet(); // see whether both ops are casts, with matching to and from types. if (op1->OperIs(GT_CAST) && op2->OperIs(GT_CAST)) { // bail if either operand is a checked cast if (op1->gtOverflow() || op2->gtOverflow()) { return nullptr; } var_types fromType = op1->AsCast()->CastOp()->TypeGet(); var_types toType = op1->AsCast()->CastToType(); bool isUnsigned = op1->IsUnsigned(); if (varTypeIsFloating(fromType) || (op2->CastFromType() != fromType) || (op2->CastToType() != toType) || (op2->IsUnsigned() != isUnsigned)) { return nullptr; } /* // Reuse gentree nodes: // // tree op1 // / \ | // op1 op2 ==> tree // | | / \. // x y x y // // (op2 becomes garbage) */ tree->gtOp1 = op1->AsCast()->CastOp(); tree->gtOp2 = op2->AsCast()->CastOp(); tree->gtType = genActualType(fromType); op1->gtType = genActualType(toType); op1->AsCast()->gtOp1 = tree; op1->AsCast()->CastToType() = toType; op1->SetAllEffectsFlags(tree); // no need to update isUnsigned DEBUG_DESTROY_NODE(op2); INDEBUG(op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return op1; } return nullptr; } /***************************************************************************** * * Transform the given GTK_SMPOP tree for code generation. */ #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) { ALLOCA_CHECK(); assert(tree->OperKind() & GTK_SMPOP); /* The steps in this function are : o Perform required preorder processing o Process the first, then second operand, if any o Perform required postorder morphing o Perform optional postorder morphing if optimizing */ bool isQmarkColon = false; AssertionIndex origAssertionCount = DUMMY_INIT(0); AssertionDsc* origAssertionTab = DUMMY_INIT(NULL); AssertionIndex thenAssertionCount = DUMMY_INIT(0); AssertionDsc* thenAssertionTab = DUMMY_INIT(NULL); if (fgGlobalMorph) { tree = fgMorphForRegisterFP(tree); } genTreeOps oper = tree->OperGet(); var_types typ = tree->TypeGet(); GenTree* op1 = tree->AsOp()->gtOp1; GenTree* op2 = tree->gtGetOp2IfPresent(); /*------------------------------------------------------------------------- * First do any PRE-ORDER processing */ switch (oper) { // Some arithmetic operators need to use a helper call to the EE int helper; case GT_ASG: tree = fgDoNormalizeOnStore(tree); /* fgDoNormalizeOnStore can change op2 */ noway_assert(op1 == tree->AsOp()->gtOp1); op2 = tree->AsOp()->gtOp2; #ifdef FEATURE_SIMD if (IsBaselineSimdIsaSupported()) { // We should check whether op2 should be assigned to a SIMD field or not. // If it is, we should tranlate the tree to simd intrinsic. assert(!fgGlobalMorph || ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0)); GenTree* newTree = fgMorphFieldAssignToSimdSetElement(tree); typ = tree->TypeGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2(); #ifdef DEBUG assert((tree == newTree) && (tree->OperGet() == oper)); if ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) != 0) { tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; } #endif // DEBUG } #endif // We can't CSE the LHS of an assignment. Only r-values can be CSEed. // Previously, the "lhs" (addr) of a block op was CSE'd. So, to duplicate the former // behavior, allow CSE'ing if is a struct type (or a TYP_REF transformed from a struct type) // TODO-1stClassStructs: improve this. if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT)) { op1->gtFlags |= GTF_DONT_CSE; } break; case GT_ADDR: /* op1 of a GT_ADDR is an l-value. Only r-values can be CSEed */ op1->gtFlags |= GTF_DONT_CSE; break; case GT_QMARK: case GT_JTRUE: noway_assert(op1); if (op1->OperIsCompare()) { /* Mark the comparison node with GTF_RELOP_JMP_USED so it knows that it does not need to materialize the result as a 0 or 1. */ /* We also mark it as DONT_CSE, as we don't handle QMARKs with nonRELOP op1s */ op1->gtFlags |= (GTF_RELOP_JMP_USED | GTF_DONT_CSE); // Request that the codegen for op1 sets the condition flags // when it generates the code for op1. // // Codegen for op1 must set the condition flags if // this method returns true. // op1->gtRequestSetFlags(); } else { GenTree* effOp1 = op1->gtEffectiveVal(); noway_assert((effOp1->gtOper == GT_CNS_INT) && (effOp1->IsIntegralConst(0) || effOp1->IsIntegralConst(1))); } break; case GT_COLON: if (optLocalAssertionProp) { isQmarkColon = true; } break; case GT_FIELD: return fgMorphField(tree, mac); case GT_INDEX: return fgMorphArrayIndex(tree); case GT_CAST: { GenTree* morphedCast = fgMorphExpandCast(tree->AsCast()); if (morphedCast != nullptr) { return morphedCast; } op1 = tree->AsCast()->CastOp(); } break; case GT_MUL: noway_assert(op2 != nullptr); if (opts.OptimizationEnabled() && !optValnumCSE_phase && !tree->gtOverflow()) { // MUL(NEG(a), C) => MUL(a, NEG(C)) if (op1->OperIs(GT_NEG) && !op1->gtGetOp1()->IsCnsIntOrI() && op2->IsCnsIntOrI() && !op2->IsIconHandle()) { GenTree* newOp1 = op1->gtGetOp1(); GenTree* newConst = gtNewIconNode(-op2->AsIntCon()->IconValue(), op2->TypeGet()); DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(op2); tree->AsOp()->gtOp1 = newOp1; tree->AsOp()->gtOp2 = newConst; return fgMorphSmpOp(tree, mac); } } #ifndef TARGET_64BIT if (typ == TYP_LONG) { // For (long)int1 * (long)int2, we dont actually do the // casts, and just multiply the 32 bit values, which will // give us the 64 bit result in edx:eax. if (tree->Is64RsltMul()) { // We are seeing this node again. // Morph only the children of casts, // so as to avoid losing them. tree = fgMorphLongMul(tree->AsOp()); goto DONE_MORPHING_CHILDREN; } tree = fgRecognizeAndMorphLongMul(tree->AsOp()); op1 = tree->AsOp()->gtGetOp1(); op2 = tree->AsOp()->gtGetOp2(); if (tree->Is64RsltMul()) { goto DONE_MORPHING_CHILDREN; } else { if (tree->gtOverflow()) helper = tree->IsUnsigned() ? CORINFO_HELP_ULMUL_OVF : CORINFO_HELP_LMUL_OVF; else helper = CORINFO_HELP_LMUL; goto USE_HELPER_FOR_ARITH; } } #endif // !TARGET_64BIT break; case GT_ARR_LENGTH: if (op1->OperIs(GT_CNS_STR)) { // Optimize `ldstr + String::get_Length()` to CNS_INT // e.g. "Hello".Length => 5 GenTreeIntCon* iconNode = gtNewStringLiteralLength(op1->AsStrCon()); if (iconNode != nullptr) { INDEBUG(iconNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return iconNode; } } break; case GT_DIV: // Replace "val / dcon" with "val * (1.0 / dcon)" if dcon is a power of two. // Powers of two within range are always exactly represented, // so multiplication by the reciprocal is safe in this scenario if (fgGlobalMorph && op2->IsCnsFltOrDbl()) { double divisor = op2->AsDblCon()->gtDconVal; if (((typ == TYP_DOUBLE) && FloatingPointUtils::hasPreciseReciprocal(divisor)) || ((typ == TYP_FLOAT) && FloatingPointUtils::hasPreciseReciprocal(forceCastToFloat(divisor)))) { oper = GT_MUL; tree->ChangeOper(oper); op2->AsDblCon()->gtDconVal = 1.0 / divisor; } } // Convert DIV to UDIV if boths op1 and op2 are known to be never negative if (!gtIsActiveCSE_Candidate(tree) && varTypeIsIntegral(tree) && op1->IsNeverNegative(this) && op2->IsNeverNegative(this)) { assert(tree->OperIs(GT_DIV)); tree->ChangeOper(GT_UDIV, GenTree::PRESERVE_VN); return fgMorphSmpOp(tree, mac); } #ifndef TARGET_64BIT if (typ == TYP_LONG) { helper = CORINFO_HELP_LDIV; goto USE_HELPER_FOR_ARITH; } #if USE_HELPERS_FOR_INT_DIV if (typ == TYP_INT) { helper = CORINFO_HELP_DIV; goto USE_HELPER_FOR_ARITH; } #endif #endif // !TARGET_64BIT break; case GT_UDIV: #ifndef TARGET_64BIT if (typ == TYP_LONG) { helper = CORINFO_HELP_ULDIV; goto USE_HELPER_FOR_ARITH; } #if USE_HELPERS_FOR_INT_DIV if (typ == TYP_INT) { helper = CORINFO_HELP_UDIV; goto USE_HELPER_FOR_ARITH; } #endif #endif // TARGET_64BIT break; case GT_MOD: if (varTypeIsFloating(typ)) { helper = CORINFO_HELP_DBLREM; noway_assert(op2); if (op1->TypeGet() == TYP_FLOAT) { if (op2->TypeGet() == TYP_FLOAT) { helper = CORINFO_HELP_FLTREM; } else { tree->AsOp()->gtOp1 = op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE); } } else if (op2->TypeGet() == TYP_FLOAT) { tree->AsOp()->gtOp2 = op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE); } goto USE_HELPER_FOR_ARITH; } // Convert MOD to UMOD if boths op1 and op2 are known to be never negative if (!gtIsActiveCSE_Candidate(tree) && varTypeIsIntegral(tree) && op1->IsNeverNegative(this) && op2->IsNeverNegative(this)) { assert(tree->OperIs(GT_MOD)); tree->ChangeOper(GT_UMOD, GenTree::PRESERVE_VN); return fgMorphSmpOp(tree, mac); } // Do not use optimizations (unlike UMOD's idiv optimizing during codegen) for signed mod. // A similar optimization for signed mod will not work for a negative perfectly divisible // HI-word. To make it correct, we would need to divide without the sign and then flip the // result sign after mod. This requires 18 opcodes + flow making it not worthy to inline. goto ASSIGN_HELPER_FOR_MOD; case GT_UMOD: #ifdef TARGET_ARMARCH // // Note for TARGET_ARMARCH we don't have a remainder instruction, so we don't do this optimization // #else // TARGET_XARCH // If this is an unsigned long mod with a constant divisor, // then don't morph to a helper call - it can be done faster inline using idiv. noway_assert(op2); if ((typ == TYP_LONG) && opts.OptEnabled(CLFLG_CONSTANTFOLD)) { if (op2->OperIs(GT_CNS_NATIVELONG) && op2->AsIntConCommon()->LngValue() >= 2 && op2->AsIntConCommon()->LngValue() <= 0x3fffffff) { tree->AsOp()->gtOp1 = op1 = fgMorphTree(op1); noway_assert(op1->TypeIs(TYP_LONG)); // Update flags for op1 morph. tree->gtFlags &= ~GTF_ALL_EFFECT; // Only update with op1 as op2 is a constant. tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT); // If op1 is a constant, then do constant folding of the division operator. if (op1->OperIs(GT_CNS_NATIVELONG)) { tree = gtFoldExpr(tree); } if (!tree->OperIsConst()) { tree->AsOp()->CheckDivideByConstOptimized(this); } return tree; } } #endif // TARGET_XARCH ASSIGN_HELPER_FOR_MOD: // For "val % 1", return 0 if op1 doesn't have any side effects // and we are not in the CSE phase, we cannot discard 'tree' // because it may contain CSE expressions that we haven't yet examined. // if (((op1->gtFlags & GTF_SIDE_EFFECT) == 0) && !optValnumCSE_phase) { if (op2->IsIntegralConst(1)) { GenTree* zeroNode = gtNewZeroConNode(typ); #ifdef DEBUG zeroNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif DEBUG_DESTROY_NODE(tree); return zeroNode; } } #ifndef TARGET_64BIT if (typ == TYP_LONG) { helper = (oper == GT_UMOD) ? CORINFO_HELP_ULMOD : CORINFO_HELP_LMOD; goto USE_HELPER_FOR_ARITH; } #if USE_HELPERS_FOR_INT_DIV if (typ == TYP_INT) { if (oper == GT_UMOD) { helper = CORINFO_HELP_UMOD; goto USE_HELPER_FOR_ARITH; } else if (oper == GT_MOD) { helper = CORINFO_HELP_MOD; goto USE_HELPER_FOR_ARITH; } } #endif #endif // !TARGET_64BIT #ifdef TARGET_ARM64 // For ARM64 we don't have a remainder instruction, // The architecture manual suggests the following transformation to // generate code for such operator: // // a % b = a - (a / b) * b; // // TODO: there are special cases where it can be done better, for example // when the modulo operation is unsigned and the divisor is a // integer constant power of two. In this case, we can make the transform: // // a % b = a & (b - 1); // // Lower supports it for all cases except when `a` is constant, but // in Morph we can't guarantee that `a` won't be transformed into a constant, // so can't guarantee that lower will be able to do this optimization. { // Do "a % b = a - (a / b) * b" morph always, see TODO before this block. bool doMorphModToSubMulDiv = true; if (doMorphModToSubMulDiv) { assert(!optValnumCSE_phase); tree = fgMorphModToSubMulDiv(tree->AsOp()); op1 = tree->AsOp()->gtOp1; op2 = tree->AsOp()->gtOp2; } } #else // !TARGET_ARM64 // If b is not a power of 2 constant then lowering replaces a % b // with a - (a / b) * b and applies magic division optimization to // a / b. The code may already contain an a / b expression (e.g. // x = a / 10; y = a % 10;) and then we end up with redundant code. // If we convert % to / here we give CSE the opportunity to eliminate // the redundant division. If there's no redundant division then // nothing is lost, lowering would have done this transform anyway. if (!optValnumCSE_phase && ((tree->OperGet() == GT_MOD) && op2->IsIntegralConst())) { ssize_t divisorValue = op2->AsIntCon()->IconValue(); size_t absDivisorValue = (divisorValue == SSIZE_T_MIN) ? static_cast<size_t>(divisorValue) : static_cast<size_t>(abs(divisorValue)); if (!isPow2(absDivisorValue)) { tree = fgMorphModToSubMulDiv(tree->AsOp()); op1 = tree->AsOp()->gtOp1; op2 = tree->AsOp()->gtOp2; } } #endif // !TARGET_ARM64 break; USE_HELPER_FOR_ARITH: { // TODO: this comment is wrong now, do an appropriate fix. /* We have to morph these arithmetic operations into helper calls before morphing the arguments (preorder), else the arguments won't get correct values of fgPtrArgCntCur. However, try to fold the tree first in case we end up with a simple node which won't need a helper call at all */ noway_assert(tree->OperIsBinary()); GenTree* oldTree = tree; tree = gtFoldExpr(tree); // Were we able to fold it ? // Note that gtFoldExpr may return a non-leaf even if successful // e.g. for something like "expr / 1" - see also bug #290853 if (tree->OperIsLeaf() || (oldTree != tree)) { return (oldTree != tree) ? fgMorphTree(tree) : fgMorphLeaf(tree); } // Did we fold it into a comma node with throw? if (tree->gtOper == GT_COMMA) { noway_assert(fgIsCommaThrow(tree)); return fgMorphTree(tree); } } return fgMorphIntoHelperCall(tree, helper, gtNewCallArgs(op1, op2)); case GT_RETURN: if (!tree->TypeIs(TYP_VOID)) { if (op1->OperIs(GT_OBJ, GT_BLK, GT_IND)) { op1 = fgMorphRetInd(tree->AsUnOp()); } if (op1->OperIs(GT_LCL_VAR)) { // With a `genReturnBB` this `RETURN(src)` tree will be replaced by a `ASG(genReturnLocal, src)` // and `ASG` will be tranformed into field by field copy without parent local referencing if // possible. GenTreeLclVar* lclVar = op1->AsLclVar(); unsigned lclNum = lclVar->GetLclNum(); if ((genReturnLocal == BAD_VAR_NUM) || (genReturnLocal == lclNum)) { LclVarDsc* varDsc = lvaGetDesc(lclVar); if (varDsc->CanBeReplacedWithItsField(this)) { // We can replace the struct with its only field and allow copy propagation to replace // return value that was written as a field. unsigned fieldLclNum = varDsc->lvFieldLclStart; LclVarDsc* fieldDsc = lvaGetDesc(fieldLclNum); JITDUMP("Replacing an independently promoted local var V%02u with its only field " "V%02u for " "the return [%06u]\n", lclVar->GetLclNum(), fieldLclNum, dspTreeID(tree)); lclVar->SetLclNum(fieldLclNum); lclVar->ChangeType(fieldDsc->lvType); } } } } // normalize small integer return values if (fgGlobalMorph && varTypeIsSmall(info.compRetType) && (op1 != nullptr) && !op1->TypeIs(TYP_VOID) && fgCastNeeded(op1, info.compRetType)) { // Small-typed return values are normalized by the callee op1 = gtNewCastNode(TYP_INT, op1, false, info.compRetType); // Propagate GTF_COLON_COND op1->gtFlags |= (tree->gtFlags & GTF_COLON_COND); tree->AsOp()->gtOp1 = fgMorphTree(op1); // Propagate side effect flags tree->SetAllEffectsFlags(tree->AsOp()->gtGetOp1()); return tree; } break; case GT_EQ: case GT_NE: { GenTree* optimizedTree = gtFoldTypeCompare(tree); if (optimizedTree != tree) { return fgMorphTree(optimizedTree); } // Pattern-matching optimization: // (a % c) ==/!= 0 // for power-of-2 constant `c` // => // a & (c - 1) ==/!= 0 // For integer `a`, even if negative. if (opts.OptimizationEnabled() && !optValnumCSE_phase) { assert(tree->OperIs(GT_EQ, GT_NE)); if (op1->OperIs(GT_MOD) && varTypeIsIntegral(op1) && op2->IsIntegralConst(0)) { GenTree* op1op2 = op1->AsOp()->gtOp2; if (op1op2->IsCnsIntOrI()) { const ssize_t modValue = op1op2->AsIntCon()->IconValue(); if (isPow2(modValue)) { JITDUMP("\nTransforming:\n"); DISPTREE(tree); op1->SetOper(GT_AND); // Change % => & op1op2->AsIntConCommon()->SetIconValue(modValue - 1); // Change c => c - 1 fgUpdateConstTreeValueNumber(op1op2); JITDUMP("\ninto:\n"); DISPTREE(tree); } } } } } FALLTHROUGH; case GT_GT: { // Try and optimize nullable boxes feeding compares GenTree* optimizedTree = gtFoldBoxNullable(tree); if (optimizedTree->OperGet() != tree->OperGet()) { return optimizedTree; } else { tree = optimizedTree; } op1 = tree->AsOp()->gtOp1; op2 = tree->gtGetOp2IfPresent(); break; } case GT_RUNTIMELOOKUP: return fgMorphTree(op1); #ifdef TARGET_ARM case GT_INTRINSIC: if (tree->AsIntrinsic()->gtIntrinsicName == NI_System_Math_Round) { switch (tree->TypeGet()) { case TYP_DOUBLE: return fgMorphIntoHelperCall(tree, CORINFO_HELP_DBLROUND, gtNewCallArgs(op1)); case TYP_FLOAT: return fgMorphIntoHelperCall(tree, CORINFO_HELP_FLTROUND, gtNewCallArgs(op1)); default: unreached(); } } break; #endif case GT_PUTARG_TYPE: return fgMorphTree(tree->AsUnOp()->gtGetOp1()); case GT_NULLCHECK: { op1 = tree->AsUnOp()->gtGetOp1(); if (op1->IsCall()) { GenTreeCall* const call = op1->AsCall(); if (call->IsHelperCall() && s_helperCallProperties.NonNullReturn(eeGetHelperNum(call->gtCallMethHnd))) { JITDUMP("\nNULLCHECK on [%06u] will always succeed\n", dspTreeID(call)); // TODO: Can we also remove the call? // return fgMorphTree(call); } } } break; default: break; } if (opts.OptimizationEnabled() && fgGlobalMorph) { GenTree* morphed = fgMorphReduceAddOps(tree); if (morphed != tree) return fgMorphTree(morphed); } /*------------------------------------------------------------------------- * Process the first operand, if any */ if (op1) { // If we are entering the "then" part of a Qmark-Colon we must // save the state of the current copy assignment table // so that we can restore this state when entering the "else" part if (isQmarkColon) { noway_assert(optLocalAssertionProp); if (optAssertionCount) { noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea unsigned tabSize = optAssertionCount * sizeof(AssertionDsc); origAssertionTab = (AssertionDsc*)ALLOCA(tabSize); origAssertionCount = optAssertionCount; memcpy(origAssertionTab, optAssertionTabPrivate, tabSize); } else { origAssertionCount = 0; origAssertionTab = nullptr; } } // We might need a new MorphAddressContext context. (These are used to convey // parent context about how addresses being calculated will be used; see the // specification comment for MorphAddrContext for full details.) // Assume it's an Ind context to start. MorphAddrContext subIndMac1(MACK_Ind); MorphAddrContext* subMac1 = mac; if (subMac1 == nullptr || subMac1->m_kind == MACK_Ind) { switch (tree->gtOper) { case GT_ADDR: // A non-null mac here implies this node is part of an address computation. // If so, we need to pass the existing mac down to the child node. // // Otherwise, use a new mac. if (subMac1 == nullptr) { subMac1 = &subIndMac1; subMac1->m_kind = MACK_Addr; } break; case GT_COMMA: // In a comma, the incoming context only applies to the rightmost arg of the // comma list. The left arg (op1) gets a fresh context. subMac1 = nullptr; break; case GT_OBJ: case GT_BLK: case GT_IND: // A non-null mac here implies this node is part of an address computation (the tree parent is // GT_ADDR). // If so, we need to pass the existing mac down to the child node. // // Otherwise, use a new mac. if (subMac1 == nullptr) { subMac1 = &subIndMac1; } break; default: break; } } // For additions, if we're in an IND context keep track of whether // all offsets added to the address are constant, and their sum. if (tree->gtOper == GT_ADD && subMac1 != nullptr) { assert(subMac1->m_kind == MACK_Ind || subMac1->m_kind == MACK_Addr); // Can't be a CopyBlock. GenTree* otherOp = tree->AsOp()->gtOp2; // Is the other operator a constant? if (otherOp->IsCnsIntOrI()) { ClrSafeInt<size_t> totalOffset(subMac1->m_totalOffset); totalOffset += otherOp->AsIntConCommon()->IconValue(); if (totalOffset.IsOverflow()) { // We will consider an offset so large as to overflow as "not a constant" -- // we will do a null check. subMac1->m_allConstantOffsets = false; } else { subMac1->m_totalOffset += otherOp->AsIntConCommon()->IconValue(); } } else { subMac1->m_allConstantOffsets = false; } } // If op1 is a GT_FIELD or indir, we need to pass down the mac if // its parent is GT_ADDR, since the address of op1 // is part of an ongoing address computation. Otherwise // op1 represents the value of the field and so any address // calculations it does are in a new context. if (((op1->gtOper == GT_FIELD) || op1->OperIsIndir()) && (tree->gtOper != GT_ADDR)) { subMac1 = nullptr; // The impact of op1's value to any ongoing // address computation is handled below when looking // at op2. } tree->AsOp()->gtOp1 = op1 = fgMorphTree(op1, subMac1); // If we are exiting the "then" part of a Qmark-Colon we must // save the state of the current copy assignment table // so that we can merge this state with the "else" part exit if (isQmarkColon) { noway_assert(optLocalAssertionProp); if (optAssertionCount) { noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea unsigned tabSize = optAssertionCount * sizeof(AssertionDsc); thenAssertionTab = (AssertionDsc*)ALLOCA(tabSize); thenAssertionCount = optAssertionCount; memcpy(thenAssertionTab, optAssertionTabPrivate, tabSize); } else { thenAssertionCount = 0; thenAssertionTab = nullptr; } } /* Morphing along with folding and inlining may have changed the * side effect flags, so we have to reset them * * NOTE: Don't reset the exception flags on nodes that may throw */ assert(tree->gtOper != GT_CALL); if (!tree->OperRequiresCallFlag(this)) { tree->gtFlags &= ~GTF_CALL; } /* Propagate the new flags */ tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT); // &aliasedVar doesn't need GTF_GLOB_REF, though alisasedVar does // Similarly for clsVar if (oper == GT_ADDR && (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CLS_VAR)) { tree->gtFlags &= ~GTF_GLOB_REF; } } // if (op1) /*------------------------------------------------------------------------- * Process the second operand, if any */ if (op2) { // If we are entering the "else" part of a Qmark-Colon we must // reset the state of the current copy assignment table if (isQmarkColon) { noway_assert(optLocalAssertionProp); optAssertionReset(0); if (origAssertionCount) { size_t tabSize = origAssertionCount * sizeof(AssertionDsc); memcpy(optAssertionTabPrivate, origAssertionTab, tabSize); optAssertionReset(origAssertionCount); } } // We might need a new MorphAddressContext context to use in evaluating op2. // (These are used to convey parent context about how addresses being calculated // will be used; see the specification comment for MorphAddrContext for full details.) // Assume it's an Ind context to start. switch (tree->gtOper) { case GT_ADD: if (mac != nullptr && mac->m_kind == MACK_Ind) { GenTree* otherOp = tree->AsOp()->gtOp1; // Is the other operator a constant? if (otherOp->IsCnsIntOrI()) { mac->m_totalOffset += otherOp->AsIntConCommon()->IconValue(); } else { mac->m_allConstantOffsets = false; } } break; default: break; } // If op2 is a GT_FIELD or indir, we must be taking its value, // so it should evaluate its address in a new context. if ((op2->gtOper == GT_FIELD) || op2->OperIsIndir()) { // The impact of op2's value to any ongoing // address computation is handled above when looking // at op1. mac = nullptr; } tree->AsOp()->gtOp2 = op2 = fgMorphTree(op2, mac); /* Propagate the side effect flags from op2 */ tree->gtFlags |= (op2->gtFlags & GTF_ALL_EFFECT); // If we are exiting the "else" part of a Qmark-Colon we must // merge the state of the current copy assignment table with // that of the exit of the "then" part. if (isQmarkColon) { noway_assert(optLocalAssertionProp); // If either exit table has zero entries then // the merged table also has zero entries if (optAssertionCount == 0 || thenAssertionCount == 0) { optAssertionReset(0); } else { size_t tabSize = optAssertionCount * sizeof(AssertionDsc); if ((optAssertionCount != thenAssertionCount) || (memcmp(thenAssertionTab, optAssertionTabPrivate, tabSize) != 0)) { // Yes they are different so we have to find the merged set // Iterate over the copy asgn table removing any entries // that do not have an exact match in the thenAssertionTab AssertionIndex index = 1; while (index <= optAssertionCount) { AssertionDsc* curAssertion = optGetAssertion(index); for (unsigned j = 0; j < thenAssertionCount; j++) { AssertionDsc* thenAssertion = &thenAssertionTab[j]; // Do the left sides match? if ((curAssertion->op1.lcl.lclNum == thenAssertion->op1.lcl.lclNum) && (curAssertion->assertionKind == thenAssertion->assertionKind)) { // Do the right sides match? if ((curAssertion->op2.kind == thenAssertion->op2.kind) && (curAssertion->op2.lconVal == thenAssertion->op2.lconVal)) { goto KEEP; } else { goto REMOVE; } } } // // If we fall out of the loop above then we didn't find // any matching entry in the thenAssertionTab so it must // have been killed on that path so we remove it here // REMOVE: // The data at optAssertionTabPrivate[i] is to be removed CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("The QMARK-COLON "); printTreeID(tree); printf(" removes assertion candidate #%d\n", index); } #endif optAssertionRemove(index); continue; KEEP: // The data at optAssertionTabPrivate[i] is to be kept index++; } } } } } // if (op2) #ifndef TARGET_64BIT DONE_MORPHING_CHILDREN: #endif // !TARGET_64BIT if (tree->OperIsIndirOrArrLength()) { tree->SetIndirExceptionFlags(this); } else { if (tree->OperMayThrow(this)) { // Mark the tree node as potentially throwing an exception tree->gtFlags |= GTF_EXCEPT; } else { if (((op1 == nullptr) || ((op1->gtFlags & GTF_EXCEPT) == 0)) && ((op2 == nullptr) || ((op2->gtFlags & GTF_EXCEPT) == 0))) { tree->gtFlags &= ~GTF_EXCEPT; } } } if (tree->OperRequiresAsgFlag()) { tree->gtFlags |= GTF_ASG; } else { if (((op1 == nullptr) || ((op1->gtFlags & GTF_ASG) == 0)) && ((op2 == nullptr) || ((op2->gtFlags & GTF_ASG) == 0))) { tree->gtFlags &= ~GTF_ASG; } } if (tree->OperRequiresCallFlag(this)) { tree->gtFlags |= GTF_CALL; } else { if (((op1 == nullptr) || ((op1->gtFlags & GTF_CALL) == 0)) && ((op2 == nullptr) || ((op2->gtFlags & GTF_CALL) == 0))) { tree->gtFlags &= ~GTF_CALL; } } /*------------------------------------------------------------------------- * Now do POST-ORDER processing */ if (varTypeIsGC(tree->TypeGet()) && (op1 && !varTypeIsGC(op1->TypeGet())) && (op2 && !varTypeIsGC(op2->TypeGet()))) { // The tree is really not GC but was marked as such. Now that the // children have been unmarked, unmark the tree too. // Remember that GT_COMMA inherits it's type only from op2 if (tree->gtOper == GT_COMMA) { tree->gtType = genActualType(op2->TypeGet()); } else { tree->gtType = genActualType(op1->TypeGet()); } } GenTree* oldTree = tree; GenTree* qmarkOp1 = nullptr; GenTree* qmarkOp2 = nullptr; if ((tree->OperGet() == GT_QMARK) && (tree->AsOp()->gtOp2->OperGet() == GT_COLON)) { qmarkOp1 = oldTree->AsOp()->gtOp2->AsOp()->gtOp1; qmarkOp2 = oldTree->AsOp()->gtOp2->AsOp()->gtOp2; } // Try to fold it, maybe we get lucky, tree = gtFoldExpr(tree); if (oldTree != tree) { /* if gtFoldExpr returned op1 or op2 then we are done */ if ((tree == op1) || (tree == op2) || (tree == qmarkOp1) || (tree == qmarkOp2)) { return tree; } /* If we created a comma-throw tree then we need to morph op1 */ if (fgIsCommaThrow(tree)) { tree->AsOp()->gtOp1 = fgMorphTree(tree->AsOp()->gtOp1); fgMorphTreeDone(tree); return tree; } return tree; } else if (tree->OperIsConst()) { return tree; } /* gtFoldExpr could have used setOper to change the oper */ oper = tree->OperGet(); typ = tree->TypeGet(); /* gtFoldExpr could have changed op1 and op2 */ op1 = tree->AsOp()->gtOp1; op2 = tree->gtGetOp2IfPresent(); // Do we have an integer compare operation? // if (tree->OperIsCompare() && varTypeIsIntegralOrI(tree->TypeGet())) { // Are we comparing against zero? // if (op2->IsIntegralConst(0)) { // Request that the codegen for op1 sets the condition flags // when it generates the code for op1. // // Codegen for op1 must set the condition flags if // this method returns true. // op1->gtRequestSetFlags(); } } /*------------------------------------------------------------------------- * Perform the required oper-specific postorder morphing */ GenTree* temp; size_t ival1; GenTree* lclVarTree; GenTree* effectiveOp1; FieldSeqNode* fieldSeq = nullptr; switch (oper) { case GT_ASG: if (op1->OperIs(GT_LCL_VAR) && ((op1->gtFlags & GTF_VAR_FOLDED_IND) != 0)) { op1->gtFlags &= ~GTF_VAR_FOLDED_IND; tree = fgDoNormalizeOnStore(tree); op2 = tree->gtGetOp2(); } lclVarTree = fgIsIndirOfAddrOfLocal(op1); if (lclVarTree != nullptr) { lclVarTree->gtFlags |= GTF_VAR_DEF; } effectiveOp1 = op1->gtEffectiveVal(); // If we are storing a small type, we might be able to omit a cast. if (effectiveOp1->OperIs(GT_IND, GT_CLS_VAR) && varTypeIsSmall(effectiveOp1)) { if (!gtIsActiveCSE_Candidate(op2) && op2->OperIs(GT_CAST) && varTypeIsIntegral(op2->AsCast()->CastOp()) && !op2->gtOverflow()) { var_types castType = op2->CastToType(); // If we are performing a narrowing cast and // castType is larger or the same as op1's type // then we can discard the cast. if (varTypeIsSmall(castType) && (genTypeSize(castType) >= genTypeSize(effectiveOp1))) { tree->AsOp()->gtOp2 = op2 = op2->AsCast()->CastOp(); } } } fgAssignSetVarDef(tree); /* We can't CSE the LHS of an assignment */ /* We also must set in the pre-morphing phase, otherwise assertionProp doesn't see it */ if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT)) { op1->gtFlags |= GTF_DONT_CSE; } break; case GT_CAST: tree = fgOptimizeCast(tree->AsCast()); if (!tree->OperIsSimple()) { return tree; } if (tree->OperIs(GT_CAST) && tree->gtOverflow()) { fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW); } typ = tree->TypeGet(); oper = tree->OperGet(); op1 = tree->AsOp()->gtGetOp1(); op2 = tree->gtGetOp2IfPresent(); break; case GT_EQ: case GT_NE: // It is not safe to reorder/delete CSE's if (!optValnumCSE_phase && op2->IsIntegralConst()) { tree = fgOptimizeEqualityComparisonWithConst(tree->AsOp()); assert(tree->OperIsCompare()); oper = tree->OperGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2(); } goto COMPARE; case GT_LT: case GT_LE: case GT_GE: case GT_GT: if (!optValnumCSE_phase && (op1->OperIs(GT_CAST) || op2->OperIs(GT_CAST))) { tree = fgOptimizeRelationalComparisonWithCasts(tree->AsOp()); oper = tree->OperGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2(); } // op2's value may be changed, so it cannot be a CSE candidate. if (op2->IsIntegralConst() && !gtIsActiveCSE_Candidate(op2)) { tree = fgOptimizeRelationalComparisonWithConst(tree->AsOp()); oper = tree->OperGet(); assert(op1 == tree->AsOp()->gtGetOp1()); assert(op2 == tree->AsOp()->gtGetOp2()); } COMPARE: noway_assert(tree->OperIsCompare()); break; case GT_MUL: #ifndef TARGET_64BIT if (typ == TYP_LONG) { // This must be GTF_MUL_64RSLT INDEBUG(tree->AsOp()->DebugCheckLongMul()); return tree; } #endif // TARGET_64BIT goto CM_OVF_OP; case GT_SUB: if (tree->gtOverflow()) { goto CM_OVF_OP; } // TODO #4104: there are a lot of other places where // this condition is not checked before transformations. if (fgGlobalMorph) { /* Check for "op1 - cns2" , we change it to "op1 + (-cns2)" */ noway_assert(op2); if (op2->IsCnsIntOrI() && !op2->IsIconHandle()) { // Negate the constant and change the node to be "+", // except when `op2` is a const byref. op2->AsIntConCommon()->SetIconValue(-op2->AsIntConCommon()->IconValue()); op2->AsIntConRef().gtFieldSeq = FieldSeqStore::NotAField(); oper = GT_ADD; tree->ChangeOper(oper); goto CM_ADD_OP; } /* Check for "cns1 - op2" , we change it to "(cns1 + (-op2))" */ noway_assert(op1); if (op1->IsCnsIntOrI()) { noway_assert(varTypeIsIntOrI(tree)); // The type of the new GT_NEG node cannot just be op2->TypeGet(). // Otherwise we may sign-extend incorrectly in cases where the GT_NEG // node ends up feeding directly into a cast, for example in // GT_CAST<ubyte>(GT_SUB(0, s_1.ubyte)) tree->AsOp()->gtOp2 = op2 = gtNewOperNode(GT_NEG, genActualType(op2->TypeGet()), op2); fgMorphTreeDone(op2); oper = GT_ADD; tree->ChangeOper(oper); goto CM_ADD_OP; } /* No match - exit */ } // Skip optimization if non-NEG operand is constant. // Both op1 and op2 are not constant because it was already checked above. if (opts.OptimizationEnabled() && fgGlobalMorph) { // a - -b = > a + b // SUB(a, (NEG(b)) => ADD(a, b) if (!op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG)) { // tree: SUB // op1: a // op2: NEG // op2Child: b GenTree* op2Child = op2->AsOp()->gtOp1; // b oper = GT_ADD; tree->SetOper(oper, GenTree::PRESERVE_VN); tree->AsOp()->gtOp2 = op2Child; DEBUG_DESTROY_NODE(op2); op2 = op2Child; } // -a - -b = > b - a // SUB(NEG(a), (NEG(b)) => SUB(b, a) else if (op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG) && gtCanSwapOrder(op1, op2)) { // tree: SUB // op1: NEG // op1Child: a // op2: NEG // op2Child: b GenTree* op1Child = op1->AsOp()->gtOp1; // a GenTree* op2Child = op2->AsOp()->gtOp1; // b tree->AsOp()->gtOp1 = op2Child; tree->AsOp()->gtOp2 = op1Child; DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(op2); op1 = op2Child; op2 = op1Child; } } break; #ifdef TARGET_ARM64 case GT_DIV: if (!varTypeIsFloating(tree->gtType)) { // Codegen for this instruction needs to be able to throw two exceptions: fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW); fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO); } break; case GT_UDIV: // Codegen for this instruction needs to be able to throw one exception: fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO); break; #endif case GT_ADD: CM_OVF_OP: if (tree->gtOverflow()) { tree->gtRequestSetFlags(); // Add the excptn-throwing basic block to jump to on overflow fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW); // We can't do any commutative morphing for overflow instructions break; } CM_ADD_OP: FALLTHROUGH; case GT_OR: case GT_XOR: case GT_AND: tree = fgOptimizeCommutativeArithmetic(tree->AsOp()); if (!tree->OperIsSimple()) { return tree; } typ = tree->TypeGet(); oper = tree->OperGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2IfPresent(); break; case GT_NOT: case GT_NEG: // Remove double negation/not. // Note: this is not a safe tranformation if "tree" is a CSE candidate. // Consider for example the following expression: NEG(NEG(OP)), where any // NEG is a CSE candidate. Were we to morph this to just OP, CSE would fail to find // the original NEG in the statement. if (op1->OperIs(oper) && opts.OptimizationEnabled() && !gtIsActiveCSE_Candidate(tree) && !gtIsActiveCSE_Candidate(op1)) { JITDUMP("Remove double negation/not\n") GenTree* op1op1 = op1->gtGetOp1(); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op1); return op1op1; } // Distribute negation over simple multiplication/division expressions if (opts.OptimizationEnabled() && !optValnumCSE_phase && tree->OperIs(GT_NEG) && op1->OperIs(GT_MUL, GT_DIV)) { GenTreeOp* mulOrDiv = op1->AsOp(); GenTree* op1op1 = mulOrDiv->gtGetOp1(); GenTree* op1op2 = mulOrDiv->gtGetOp2(); if (!op1op1->IsCnsIntOrI() && op1op2->IsCnsIntOrI() && !op1op2->IsIconHandle()) { // NEG(MUL(a, C)) => MUL(a, -C) // NEG(DIV(a, C)) => DIV(a, -C), except when C = {-1, 1} ssize_t constVal = op1op2->AsIntCon()->IconValue(); if ((mulOrDiv->OperIs(GT_DIV) && (constVal != -1) && (constVal != 1)) || (mulOrDiv->OperIs(GT_MUL) && !mulOrDiv->gtOverflow())) { GenTree* newOp1 = op1op1; // a GenTree* newOp2 = gtNewIconNode(-constVal, op1op2->TypeGet()); // -C mulOrDiv->gtOp1 = newOp1; mulOrDiv->gtOp2 = newOp2; mulOrDiv->SetVNsFromNode(tree); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op1op2); return mulOrDiv; } } } /* Any constant cases should have been folded earlier */ noway_assert(!op1->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD) || optValnumCSE_phase); break; case GT_CKFINITE: noway_assert(varTypeIsFloating(op1->TypeGet())); fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_ARITH_EXCPN); break; case GT_BOUNDS_CHECK: fgSetRngChkTarget(tree); break; case GT_OBJ: case GT_BLK: case GT_IND: { // If we have IND(ADDR(X)) and X has GTF_GLOB_REF, we must set GTF_GLOB_REF on // the OBJ. Note that the GTF_GLOB_REF will have been cleared on ADDR(X) where X // is a local or CLS_VAR, even if it has been address-exposed. if (op1->OperIs(GT_ADDR)) { tree->gtFlags |= (op1->AsUnOp()->gtGetOp1()->gtFlags & GTF_GLOB_REF); } if (!tree->OperIs(GT_IND)) { break; } // Can not remove a GT_IND if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(tree)) { break; } bool foldAndReturnTemp = false; temp = nullptr; ival1 = 0; // Don't remove a volatile GT_IND, even if the address points to a local variable. if ((tree->gtFlags & GTF_IND_VOLATILE) == 0) { /* Try to Fold *(&X) into X */ if (op1->gtOper == GT_ADDR) { // Can not remove a GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(op1)) { break; } temp = op1->AsOp()->gtOp1; // X // In the test below, if they're both TYP_STRUCT, this of course does *not* mean that // they are the *same* struct type. In fact, they almost certainly aren't. If the // address has an associated field sequence, that identifies this case; go through // the "lcl_fld" path rather than this one. FieldSeqNode* addrFieldSeq = nullptr; // This is an unused out parameter below. if (typ == temp->TypeGet() && !GetZeroOffsetFieldMap()->Lookup(op1, &addrFieldSeq)) { foldAndReturnTemp = true; } else if (temp->OperIsLocal()) { unsigned lclNum = temp->AsLclVarCommon()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); // We will try to optimize when we have a promoted struct promoted with a zero lvFldOffset if (varDsc->lvPromoted && (varDsc->lvFldOffset == 0)) { noway_assert(varTypeIsStruct(varDsc)); // We will try to optimize when we have a single field struct that is being struct promoted if (varDsc->lvFieldCnt == 1) { unsigned lclNumFld = varDsc->lvFieldLclStart; // just grab the promoted field LclVarDsc* fieldVarDsc = lvaGetDesc(lclNumFld); // Also make sure that the tree type matches the fieldVarType and that it's lvFldOffset // is zero if (fieldVarDsc->TypeGet() == typ && (fieldVarDsc->lvFldOffset == 0)) { // We can just use the existing promoted field LclNum temp->AsLclVarCommon()->SetLclNum(lclNumFld); temp->gtType = fieldVarDsc->TypeGet(); foldAndReturnTemp = true; } } } // If the type of the IND (typ) is a "small int", and the type of the local has the // same width, then we can reduce to just the local variable -- it will be // correctly normalized. // // The below transformation cannot be applied if the local var needs to be normalized on load. else if (varTypeIsSmall(typ) && (genTypeSize(varDsc) == genTypeSize(typ)) && !lvaTable[lclNum].lvNormalizeOnLoad()) { const bool definitelyLoad = (tree->gtFlags & GTF_DONT_CSE) == 0; const bool possiblyStore = !definitelyLoad; if (possiblyStore || (varTypeIsUnsigned(varDsc) == varTypeIsUnsigned(typ))) { typ = temp->TypeGet(); tree->gtType = typ; foldAndReturnTemp = true; if (possiblyStore) { // This node can be on the left-hand-side of an assignment node. // Mark this node with GTF_VAR_FOLDED_IND to make sure that fgDoNormalizeOnStore() // is called on its parent in post-order morph. temp->gtFlags |= GTF_VAR_FOLDED_IND; } } } // For matching types we can fold else if (!varTypeIsStruct(typ) && (lvaTable[lclNum].lvType == typ) && !lvaTable[lclNum].lvNormalizeOnLoad()) { tree->gtType = typ = temp->TypeGet(); foldAndReturnTemp = true; } else { // Assumes that when Lookup returns "false" it will leave "fieldSeq" unmodified (i.e. // nullptr) assert(fieldSeq == nullptr); bool b = GetZeroOffsetFieldMap()->Lookup(op1, &fieldSeq); assert(b || fieldSeq == nullptr); if ((fieldSeq != nullptr) && (temp->OperGet() == GT_LCL_FLD)) { // Append the field sequence, change the type. temp->AsLclFld()->SetFieldSeq( GetFieldSeqStore()->Append(temp->AsLclFld()->GetFieldSeq(), fieldSeq)); temp->gtType = typ; foldAndReturnTemp = true; } } // Otherwise will will fold this into a GT_LCL_FLD below // where we check (temp != nullptr) } else // !temp->OperIsLocal() { // We don't try to fold away the GT_IND/GT_ADDR for this case temp = nullptr; } } else if (op1->OperGet() == GT_ADD) { #ifdef TARGET_ARM // Check for a misalignment floating point indirection. if (varTypeIsFloating(typ)) { GenTree* addOp2 = op1->AsOp()->gtGetOp2(); if (addOp2->IsCnsIntOrI()) { ssize_t offset = addOp2->AsIntCon()->gtIconVal; if ((offset % emitTypeSize(TYP_FLOAT)) != 0) { tree->gtFlags |= GTF_IND_UNALIGNED; } } } #endif // TARGET_ARM /* Try to change *(&lcl + cns) into lcl[cns] to prevent materialization of &lcl */ if (op1->AsOp()->gtOp1->OperGet() == GT_ADDR && op1->AsOp()->gtOp2->OperGet() == GT_CNS_INT && opts.OptimizationEnabled()) { // No overflow arithmetic with pointers noway_assert(!op1->gtOverflow()); temp = op1->AsOp()->gtOp1->AsOp()->gtOp1; if (!temp->OperIsLocal()) { temp = nullptr; break; } // Can not remove the GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(op1->AsOp()->gtOp1)) { break; } ival1 = op1->AsOp()->gtOp2->AsIntCon()->gtIconVal; fieldSeq = op1->AsOp()->gtOp2->AsIntCon()->gtFieldSeq; // Does the address have an associated zero-offset field sequence? FieldSeqNode* addrFieldSeq = nullptr; if (GetZeroOffsetFieldMap()->Lookup(op1->AsOp()->gtOp1, &addrFieldSeq)) { fieldSeq = GetFieldSeqStore()->Append(addrFieldSeq, fieldSeq); } if (ival1 == 0 && typ == temp->TypeGet() && temp->TypeGet() != TYP_STRUCT) { noway_assert(!varTypeIsGC(temp->TypeGet())); foldAndReturnTemp = true; } else { // The emitter can't handle large offsets if (ival1 != (unsigned short)ival1) { break; } // The emitter can get confused by invalid offsets if (ival1 >= Compiler::lvaLclSize(temp->AsLclVarCommon()->GetLclNum())) { break; } } // Now we can fold this into a GT_LCL_FLD below // where we check (temp != nullptr) } } } // At this point we may have a lclVar or lclFld that might be foldable with a bit of extra massaging: // - We may have a load of a local where the load has a different type than the local // - We may have a load of a local plus an offset // // In these cases, we will change the lclVar or lclFld into a lclFld of the appropriate type and // offset if doing so is legal. The only cases in which this transformation is illegal are if the load // begins before the local or if the load extends beyond the end of the local (i.e. if the load is // out-of-bounds w.r.t. the local). if ((temp != nullptr) && !foldAndReturnTemp) { assert(temp->OperIsLocal()); const unsigned lclNum = temp->AsLclVarCommon()->GetLclNum(); LclVarDsc* const varDsc = lvaGetDesc(lclNum); const var_types tempTyp = temp->TypeGet(); const bool useExactSize = varTypeIsStruct(tempTyp) || (tempTyp == TYP_BLK) || (tempTyp == TYP_LCLBLK); const unsigned varSize = useExactSize ? varDsc->lvExactSize : genTypeSize(temp); // Make sure we do not enregister this lclVar. lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField)); // If the size of the load is greater than the size of the lclVar, we cannot fold this access into // a lclFld: the access represented by an lclFld node must begin at or after the start of the // lclVar and must not extend beyond the end of the lclVar. if ((ival1 >= 0) && ((ival1 + genTypeSize(typ)) <= varSize)) { GenTreeLclFld* lclFld; // We will turn a GT_LCL_VAR into a GT_LCL_FLD with an gtLclOffs of 'ival' // or if we already have a GT_LCL_FLD we will adjust the gtLclOffs by adding 'ival' // Then we change the type of the GT_LCL_FLD to match the orginal GT_IND type. // if (temp->OperGet() == GT_LCL_FLD) { lclFld = temp->AsLclFld(); lclFld->SetLclOffs(lclFld->GetLclOffs() + static_cast<unsigned>(ival1)); lclFld->SetFieldSeq(GetFieldSeqStore()->Append(lclFld->GetFieldSeq(), fieldSeq)); } else // We have a GT_LCL_VAR. { assert(temp->OperGet() == GT_LCL_VAR); temp->ChangeOper(GT_LCL_FLD); // Note that this makes the gtFieldSeq "NotAField". lclFld = temp->AsLclFld(); lclFld->SetLclOffs(static_cast<unsigned>(ival1)); if (fieldSeq != nullptr) { // If it does represent a field, note that. lclFld->SetFieldSeq(fieldSeq); } } temp->gtType = tree->gtType; foldAndReturnTemp = true; } } if (foldAndReturnTemp) { assert(temp != nullptr); assert(temp->TypeGet() == typ); assert((op1->OperGet() == GT_ADD) || (op1->OperGet() == GT_ADDR)); // Copy the value of GTF_DONT_CSE from the original tree to `temp`: it can be set for // 'temp' because a GT_ADDR always marks it for its operand. temp->gtFlags &= ~GTF_DONT_CSE; temp->gtFlags |= (tree->gtFlags & GTF_DONT_CSE); if (op1->OperGet() == GT_ADD) { DEBUG_DESTROY_NODE(op1->AsOp()->gtOp1); // GT_ADDR DEBUG_DESTROY_NODE(op1->AsOp()->gtOp2); // GT_CNS_INT } DEBUG_DESTROY_NODE(op1); // GT_ADD or GT_ADDR DEBUG_DESTROY_NODE(tree); // GT_IND // If the result of the fold is a local var, we may need to perform further adjustments e.g. for // normalization. if (temp->OperIs(GT_LCL_VAR)) { #ifdef DEBUG // We clear this flag on `temp` because `fgMorphLocalVar` may assert that this bit is clear // and the node in question must have this bit set (as it has already been morphed). temp->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; #endif // DEBUG const bool forceRemorph = true; temp = fgMorphLocalVar(temp, forceRemorph); #ifdef DEBUG // We then set this flag on `temp` because `fgMorhpLocalVar` may not set it itself, and the // caller of `fgMorphSmpOp` may assert that this flag is set on `temp` once this function // returns. temp->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } return temp; } // Only do this optimization when we are in the global optimizer. Doing this after value numbering // could result in an invalid value number for the newly generated GT_IND node. if ((op1->OperGet() == GT_COMMA) && fgGlobalMorph) { // Perform the transform IND(COMMA(x, ..., z)) == COMMA(x, ..., IND(z)). // TBD: this transformation is currently necessary for correctness -- it might // be good to analyze the failures that result if we don't do this, and fix them // in other ways. Ideally, this should be optional. GenTree* commaNode = op1; GenTreeFlags treeFlags = tree->gtFlags; commaNode->gtType = typ; commaNode->gtFlags = (treeFlags & ~GTF_REVERSE_OPS); // Bashing the GT_COMMA flags here is // dangerous, clear the GTF_REVERSE_OPS at // least. #ifdef DEBUG commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif while (commaNode->AsOp()->gtOp2->gtOper == GT_COMMA) { commaNode = commaNode->AsOp()->gtOp2; commaNode->gtType = typ; commaNode->gtFlags = (treeFlags & ~GTF_REVERSE_OPS & ~GTF_ASG & ~GTF_CALL); // Bashing the GT_COMMA flags here is // dangerous, clear the GTF_REVERSE_OPS, GT_ASG, and GT_CALL at // least. commaNode->gtFlags |= ((commaNode->AsOp()->gtOp1->gtFlags | commaNode->AsOp()->gtOp2->gtFlags) & (GTF_ASG | GTF_CALL)); #ifdef DEBUG commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif } bool wasArrIndex = (tree->gtFlags & GTF_IND_ARR_INDEX) != 0; ArrayInfo arrInfo; if (wasArrIndex) { bool b = GetArrayInfoMap()->Lookup(tree, &arrInfo); assert(b); GetArrayInfoMap()->Remove(tree); } tree = op1; GenTree* addr = commaNode->AsOp()->gtOp2; // TODO-1stClassStructs: we often create a struct IND without a handle, fix it. op1 = gtNewIndir(typ, addr); // This is very conservative op1->gtFlags |= treeFlags & ~GTF_ALL_EFFECT & ~GTF_IND_NONFAULTING; op1->gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT); if (wasArrIndex) { GetArrayInfoMap()->Set(op1, arrInfo); } #ifdef DEBUG op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif commaNode->AsOp()->gtOp2 = op1; commaNode->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT); return tree; } break; } case GT_ADDR: // Can not remove op1 if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(op1)) { break; } if (op1->OperGet() == GT_IND) { if ((op1->gtFlags & GTF_IND_ARR_INDEX) == 0) { // Can not remove a GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(tree)) { break; } // Perform the transform ADDR(IND(...)) == (...). GenTree* addr = op1->AsOp()->gtOp1; // If tree has a zero field sequence annotation, update the annotation // on addr node. FieldSeqNode* zeroFieldSeq = nullptr; if (GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq)) { fgAddFieldSeqForZeroOffset(addr, zeroFieldSeq); } noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL); DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(tree); return addr; } } else if (op1->OperGet() == GT_OBJ) { // Can not remove a GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(tree)) { break; } // Perform the transform ADDR(OBJ(...)) == (...). GenTree* addr = op1->AsObj()->Addr(); noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL); DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(tree); return addr; } else if ((op1->gtOper == GT_COMMA) && !optValnumCSE_phase) { // Perform the transform ADDR(COMMA(x, ..., z)) == COMMA(x, ..., ADDR(z)). // (Be sure to mark "z" as an l-value...) ArrayStack<GenTree*> commas(getAllocator(CMK_ArrayStack)); for (GenTree* comma = op1; comma != nullptr && comma->gtOper == GT_COMMA; comma = comma->gtGetOp2()) { commas.Push(comma); } GenTree* commaNode = commas.Top(); // The top-level addr might be annotated with a zeroOffset field. FieldSeqNode* zeroFieldSeq = nullptr; bool isZeroOffset = GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq); tree = op1; commaNode->AsOp()->gtOp2->gtFlags |= GTF_DONT_CSE; // If the node we're about to put under a GT_ADDR is an indirection, it // doesn't need to be materialized, since we only want the addressing mode. Because // of this, this GT_IND is not a faulting indirection and we don't have to extract it // as a side effect. GenTree* commaOp2 = commaNode->AsOp()->gtOp2; if (commaOp2->OperIsBlk()) { commaOp2->SetOper(GT_IND); } if (commaOp2->gtOper == GT_IND) { commaOp2->gtFlags |= GTF_IND_NONFAULTING; commaOp2->gtFlags &= ~GTF_EXCEPT; commaOp2->gtFlags |= (commaOp2->AsOp()->gtOp1->gtFlags & GTF_EXCEPT); } op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, commaOp2); if (isZeroOffset) { // Transfer the annotation to the new GT_ADDR node. fgAddFieldSeqForZeroOffset(op1, zeroFieldSeq); } commaNode->AsOp()->gtOp2 = op1; // Originally, I gave all the comma nodes type "byref". But the ADDR(IND(x)) == x transform // might give op1 a type different from byref (like, say, native int). So now go back and give // all the comma nodes the type of op1. // TODO: the comma flag update below is conservative and can be improved. // For example, if we made the ADDR(IND(x)) == x transformation, we may be able to // get rid of some of the IND flags on the COMMA nodes (e.g., GTF_GLOB_REF). while (!commas.Empty()) { GenTree* comma = commas.Pop(); comma->gtType = op1->gtType; comma->gtFlags |= op1->gtFlags; #ifdef DEBUG comma->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif gtUpdateNodeSideEffects(comma); } return tree; } break; case GT_COLON: if (fgGlobalMorph) { /* Mark the nodes that are conditionally executed */ fgWalkTreePre(&tree, gtMarkColonCond); } /* Since we're doing this postorder we clear this if it got set by a child */ fgRemoveRestOfBlock = false; break; case GT_COMMA: /* Special case: trees that don't produce a value */ if (op2->OperIs(GT_ASG) || (op2->OperGet() == GT_COMMA && op2->TypeGet() == TYP_VOID) || fgIsThrow(op2)) { typ = tree->gtType = TYP_VOID; } // If we are in the Valuenum CSE phase then don't morph away anything as these // nodes may have CSE defs/uses in them. // if (!optValnumCSE_phase) { // Extract the side effects from the left side of the comma. Since they don't "go" anywhere, this // is all we need. GenTree* op1SideEffects = nullptr; // The addition of "GTF_MAKE_CSE" below prevents us from throwing away (for example) // hoisted expressions in loops. gtExtractSideEffList(op1, &op1SideEffects, (GTF_SIDE_EFFECT | GTF_MAKE_CSE)); if (op1SideEffects) { // Replace the left hand side with the side effect list. op1 = op1SideEffects; tree->AsOp()->gtOp1 = op1SideEffects; gtUpdateNodeSideEffects(tree); } else { op2->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG)); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op1); return op2; } // If the right operand is just a void nop node, throw it away. Unless this is a // comma throw, in which case we want the top-level morphing loop to recognize it. if (op2->IsNothingNode() && op1->TypeIs(TYP_VOID) && !fgIsCommaThrow(tree)) { op1->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG)); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op2); return op1; } } break; case GT_JTRUE: /* Special case if fgRemoveRestOfBlock is set to true */ if (fgRemoveRestOfBlock) { if (fgIsCommaThrow(op1, true)) { GenTree* throwNode = op1->AsOp()->gtOp1; JITDUMP("Removing [%06d] GT_JTRUE as the block now unconditionally throws an exception.\n", dspTreeID(tree)); DEBUG_DESTROY_NODE(tree); return throwNode; } noway_assert(op1->OperIsCompare()); noway_assert(op1->gtFlags & GTF_EXCEPT); // We need to keep op1 for the side-effects. Hang it off // a GT_COMMA node JITDUMP("Keeping side-effects by bashing [%06d] GT_JTRUE into a GT_COMMA.\n", dspTreeID(tree)); tree->ChangeOper(GT_COMMA); tree->AsOp()->gtOp2 = op2 = gtNewNothingNode(); // Additionally since we're eliminating the JTRUE // codegen won't like it if op1 is a RELOP of longs, floats or doubles. // So we change it into a GT_COMMA as well. JITDUMP("Also bashing [%06d] (a relop) into a GT_COMMA.\n", dspTreeID(op1)); op1->ChangeOper(GT_COMMA); op1->gtFlags &= ~GTF_UNSIGNED; // Clear the unsigned flag if it was set on the relop op1->gtType = op1->AsOp()->gtOp1->gtType; return tree; } break; case GT_INTRINSIC: if (tree->AsIntrinsic()->gtIntrinsicName == NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant) { // Should be expanded by the time it reaches CSE phase assert(!optValnumCSE_phase); JITDUMP("\nExpanding RuntimeHelpers.IsKnownConstant to "); if (op1->OperIsConst()) { // We're lucky to catch a constant here while importer was not JITDUMP("true\n"); DEBUG_DESTROY_NODE(tree, op1); tree = gtNewIconNode(1); } else { GenTree* op1SideEffects = nullptr; gtExtractSideEffList(op1, &op1SideEffects, GTF_ALL_EFFECT); if (op1SideEffects != nullptr) { DEBUG_DESTROY_NODE(tree); // Keep side-effects of op1 tree = gtNewOperNode(GT_COMMA, TYP_INT, op1SideEffects, gtNewIconNode(0)); JITDUMP("false with side effects:\n") DISPTREE(tree); } else { JITDUMP("false\n"); DEBUG_DESTROY_NODE(tree, op1); tree = gtNewIconNode(0); } } INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return tree; } break; default: break; } assert(oper == tree->gtOper); // Propagate comma throws. // If we are in the Valuenum CSE phase then don't morph away anything as these // nodes may have CSE defs/uses in them. if (fgGlobalMorph && (oper != GT_ASG) && (oper != GT_COLON)) { if ((op1 != nullptr) && fgIsCommaThrow(op1, true)) { GenTree* propagatedThrow = fgPropagateCommaThrow(tree, op1->AsOp(), GTF_EMPTY); if (propagatedThrow != nullptr) { return propagatedThrow; } } if ((op2 != nullptr) && fgIsCommaThrow(op2, true)) { GenTree* propagatedThrow = fgPropagateCommaThrow(tree, op2->AsOp(), op1->gtFlags & GTF_ALL_EFFECT); if (propagatedThrow != nullptr) { return propagatedThrow; } } } /*------------------------------------------------------------------------- * Optional morphing is done if tree transformations is permitted */ if ((opts.compFlags & CLFLG_TREETRANS) == 0) { return tree; } tree = fgMorphSmpOpOptional(tree->AsOp()); return tree; } //------------------------------------------------------------------------ // fgOptimizeCast: Optimizes the supplied GT_CAST tree. // // Tries to get rid of the cast, its operand, the GTF_OVERFLOW flag, calls // calls "optNarrowTree". Called in post-order by "fgMorphSmpOp". // // Arguments: // tree - the cast tree to optimize // // Return Value: // The optimized tree (that can have any shape). // GenTree* Compiler::fgOptimizeCast(GenTreeCast* cast) { GenTree* src = cast->CastOp(); if (gtIsActiveCSE_Candidate(cast) || gtIsActiveCSE_Candidate(src)) { return cast; } // See if we can discard the cast. if (varTypeIsIntegral(cast) && varTypeIsIntegral(src)) { IntegralRange srcRange = IntegralRange::ForNode(src, this); IntegralRange noOvfRange = IntegralRange::ForCastInput(cast); if (noOvfRange.Contains(srcRange)) { // Casting between same-sized types is a no-op, // given we have proven this cast cannot overflow. if (genActualType(cast) == genActualType(src)) { return src; } cast->ClearOverflow(); cast->SetAllEffectsFlags(src); // Try and see if we can make this cast into a cheaper zero-extending version. if (genActualTypeIsInt(src) && cast->TypeIs(TYP_LONG) && srcRange.IsPositive()) { cast->SetUnsigned(); } } // For checked casts, we're done. if (cast->gtOverflow()) { return cast; } var_types castToType = cast->CastToType(); // For indir-like nodes, we may be able to change their type to satisfy (and discard) the cast. if (varTypeIsSmall(castToType) && (genTypeSize(castToType) == genTypeSize(src)) && src->OperIs(GT_IND, GT_CLS_VAR, GT_LCL_FLD)) { // We're changing the type here so we need to update the VN; // in other cases we discard the cast without modifying src // so the VN doesn't change. src->ChangeType(castToType); src->SetVNsFromNode(cast); return src; } // Try to narrow the operand of the cast and discard the cast. if (opts.OptEnabled(CLFLG_TREETRANS) && (genTypeSize(src) > genTypeSize(castToType)) && optNarrowTree(src, src->TypeGet(), castToType, cast->gtVNPair, false)) { optNarrowTree(src, src->TypeGet(), castToType, cast->gtVNPair, true); // "optNarrowTree" may leave a dead cast behind. if (src->OperIs(GT_CAST) && (src->AsCast()->CastToType() == genActualType(src->AsCast()->CastOp()))) { src = src->AsCast()->CastOp(); } return src; } // Check for two consecutive casts, we may be able to discard the intermediate one. if (opts.OptimizationEnabled() && src->OperIs(GT_CAST) && !src->gtOverflow()) { var_types dstCastToType = castToType; var_types srcCastToType = src->AsCast()->CastToType(); // CAST(ubyte <- CAST(short <- X)): CAST(ubyte <- X). // CAST(ushort <- CAST(short <- X)): CAST(ushort <- X). if (varTypeIsSmall(srcCastToType) && (genTypeSize(dstCastToType) <= genTypeSize(srcCastToType))) { cast->CastOp() = src->AsCast()->CastOp(); DEBUG_DESTROY_NODE(src); } } } return cast; } //------------------------------------------------------------------------ // fgOptimizeEqualityComparisonWithConst: optimizes various EQ/NE(OP, CONST) patterns. // // Arguments: // cmp - The GT_NE/GT_EQ tree the second operand of which is an integral constant // // Return Value: // The optimized tree, "cmp" in case no optimizations were done. // Currently only returns relop trees. // GenTree* Compiler::fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp) { assert(cmp->OperIs(GT_EQ, GT_NE)); assert(cmp->gtGetOp2()->IsIntegralConst()); assert(!optValnumCSE_phase); GenTree* op1 = cmp->gtGetOp1(); GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon(); // Check for "(expr +/- icon1) ==/!= (non-zero-icon2)". if (op2->IsCnsIntOrI() && (op2->IconValue() != 0)) { // Since this can occur repeatedly we use a while loop. while (op1->OperIs(GT_ADD, GT_SUB) && op1->AsOp()->gtGetOp2()->IsCnsIntOrI() && op1->TypeIs(TYP_INT) && !op1->gtOverflow()) { // Got it; change "x + icon1 == icon2" to "x == icon2 - icon1". ssize_t op1Value = op1->AsOp()->gtGetOp2()->AsIntCon()->IconValue(); ssize_t op2Value = op2->IconValue(); if (op1->OperIs(GT_ADD)) { op2Value -= op1Value; } else { op2Value += op1Value; } op1 = op1->AsOp()->gtGetOp1(); op2->SetIconValue(static_cast<int32_t>(op2Value)); } cmp->gtOp1 = op1; fgUpdateConstTreeValueNumber(op2); } // Here we look for the following tree // // EQ/NE // / \. // op1 CNS 0/1 // if (op2->IsIntegralConst(0) || op2->IsIntegralConst(1)) { ssize_t op2Value = static_cast<ssize_t>(op2->IntegralValue()); if (op1->OperIsCompare()) { // Here we look for the following tree // // EQ/NE -> RELOP/!RELOP // / \ / \. // RELOP CNS 0/1 // / \. // // Note that we will remove/destroy the EQ/NE node and move // the RELOP up into it's location. // Here we reverse the RELOP if necessary. bool reverse = ((op2Value == 0) == (cmp->OperIs(GT_EQ))); if (reverse) { gtReverseCond(op1); } noway_assert((op1->gtFlags & GTF_RELOP_JMP_USED) == 0); op1->gtFlags |= cmp->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE); op1->SetVNsFromNode(cmp); DEBUG_DESTROY_NODE(cmp); return op1; } // // Now we check for a compare with the result of an '&' operator // // Here we look for the following transformation: // // EQ/NE EQ/NE // / \ / \. // AND CNS 0/1 -> AND CNS 0 // / \ / \. // RSZ/RSH CNS 1 x CNS (1 << y) // / \. // x CNS_INT +y if (fgGlobalMorph && op1->OperIs(GT_AND) && op1->AsOp()->gtGetOp1()->OperIs(GT_RSZ, GT_RSH)) { GenTreeOp* andOp = op1->AsOp(); GenTreeOp* rshiftOp = andOp->gtGetOp1()->AsOp(); if (!rshiftOp->gtGetOp2()->IsCnsIntOrI()) { goto SKIP; } ssize_t shiftAmount = rshiftOp->gtGetOp2()->AsIntCon()->IconValue(); if (shiftAmount < 0) { goto SKIP; } if (!andOp->gtGetOp2()->IsIntegralConst(1)) { goto SKIP; } GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon(); if (andOp->TypeIs(TYP_INT)) { if (shiftAmount > 31) { goto SKIP; } andMask->SetIconValue(static_cast<int32_t>(1 << shiftAmount)); // Reverse the condition if necessary. if (op2Value == 1) { gtReverseCond(cmp); op2->SetIconValue(0); } } else if (andOp->TypeIs(TYP_LONG)) { if (shiftAmount > 63) { goto SKIP; } andMask->SetLngValue(1ll << shiftAmount); // Reverse the cond if necessary if (op2Value == 1) { gtReverseCond(cmp); op2->SetLngValue(0); } } andOp->gtOp1 = rshiftOp->gtGetOp1(); DEBUG_DESTROY_NODE(rshiftOp->gtGetOp2()); DEBUG_DESTROY_NODE(rshiftOp); } } SKIP: // Now check for compares with small constant longs that can be cast to int. // Note that we filter out negative values here so that the transformations // below are correct. E. g. "EQ(-1L, CAST_UN(int))" is always "false", but were // we to make it into "EQ(-1, int)", "true" becomes possible for negative inputs. if (!op2->TypeIs(TYP_LONG) || ((op2->LngValue() >> 31) != 0)) { return cmp; } if (!op1->OperIs(GT_AND)) { // Another interesting case: cast from int. if (op1->OperIs(GT_CAST) && op1->AsCast()->CastOp()->TypeIs(TYP_INT) && !op1->gtOverflow()) { // Simply make this into an integer comparison. cmp->gtOp1 = op1->AsCast()->CastOp(); op2->BashToConst(static_cast<int32_t>(op2->LngValue())); fgUpdateConstTreeValueNumber(op2); } return cmp; } // Now we perform the following optimization: // EQ/NE(AND(OP long, CNS_LNG), CNS_LNG) => // EQ/NE(AND(CAST(int <- OP), CNS_INT), CNS_INT) // when the constants are sufficiently small. // This transform cannot preserve VNs. if (fgGlobalMorph) { assert(op1->TypeIs(TYP_LONG) && op1->OperIs(GT_AND)); // Is the result of the mask effectively an INT? GenTreeOp* andOp = op1->AsOp(); if (!andOp->gtGetOp2()->OperIs(GT_CNS_NATIVELONG)) { return cmp; } GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon(); if ((andMask->LngValue() >> 32) != 0) { return cmp; } // Now we narrow the first operand of AND to int. if (optNarrowTree(andOp->gtGetOp1(), TYP_LONG, TYP_INT, ValueNumPair(), false)) { optNarrowTree(andOp->gtGetOp1(), TYP_LONG, TYP_INT, ValueNumPair(), true); } else { andOp->gtOp1 = gtNewCastNode(TYP_INT, andOp->gtGetOp1(), false, TYP_INT); } assert(andMask == andOp->gtGetOp2()); // Now replace the mask node. andMask->BashToConst(static_cast<int32_t>(andMask->LngValue())); // Now change the type of the AND node. andOp->ChangeType(TYP_INT); // Finally we replace the comparand. op2->BashToConst(static_cast<int32_t>(op2->LngValue())); } return cmp; } //------------------------------------------------------------------------ // fgOptimizeRelationalComparisonWithConst: optimizes a comparison operation. // // Recognizes comparisons against various constant operands and morphs // them, if possible, into comparisons against zero. // // Arguments: // cmp - the GT_LE/GT_LT/GT_GE/GT_GT tree to morph. // // Return Value: // The "cmp" tree, possibly with a modified oper. // The second operand's constant value may be modified as well. // // Assumptions: // The operands have been swapped so that any constants are on the right. // The second operand is an integral constant. // GenTree* Compiler::fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp) { assert(cmp->OperIs(GT_LE, GT_LT, GT_GE, GT_GT)); assert(cmp->gtGetOp2()->IsIntegralConst()); assert(!gtIsActiveCSE_Candidate(cmp->gtGetOp2())); GenTree* op1 = cmp->gtGetOp1(); GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon(); assert(genActualType(op1) == genActualType(op2)); genTreeOps oper = cmp->OperGet(); int64_t op2Value = op2->IntegralValue(); if (op2Value == 1) { // Check for "expr >= 1". if (oper == GT_GE) { // Change to "expr != 0" for unsigned and "expr > 0" for signed. oper = cmp->IsUnsigned() ? GT_NE : GT_GT; } // Check for "expr < 1". else if (oper == GT_LT) { // Change to "expr == 0" for unsigned and "expr <= 0". oper = cmp->IsUnsigned() ? GT_EQ : GT_LE; } } // Check for "expr relop -1". else if (!cmp->IsUnsigned() && (op2Value == -1)) { // Check for "expr <= -1". if (oper == GT_LE) { // Change to "expr < 0". oper = GT_LT; } // Check for "expr > -1". else if (oper == GT_GT) { // Change to "expr >= 0". oper = GT_GE; } } else if (cmp->IsUnsigned()) { if ((oper == GT_LE) || (oper == GT_GT)) { if (op2Value == 0) { // IL doesn't have a cne instruction so compilers use cgt.un instead. The JIT // recognizes certain patterns that involve GT_NE (e.g (x & 4) != 0) and fails // if GT_GT is used instead. Transform (x GT_GT.unsigned 0) into (x GT_NE 0) // and (x GT_LE.unsigned 0) into (x GT_EQ 0). The later case is rare, it sometimes // occurs as a result of branch inversion. oper = (oper == GT_LE) ? GT_EQ : GT_NE; cmp->gtFlags &= ~GTF_UNSIGNED; } // LE_UN/GT_UN(expr, int/long.MaxValue) => GE/LT(expr, 0). else if (((op1->TypeIs(TYP_LONG) && (op2Value == INT64_MAX))) || ((genActualType(op1) == TYP_INT) && (op2Value == INT32_MAX))) { oper = (oper == GT_LE) ? GT_GE : GT_LT; cmp->gtFlags &= ~GTF_UNSIGNED; } } } if (!cmp->OperIs(oper)) { // Keep the old ValueNumber for 'tree' as the new expr // will still compute the same value as before. cmp->SetOper(oper, GenTree::PRESERVE_VN); op2->SetIntegralValue(0); fgUpdateConstTreeValueNumber(op2); } return cmp; } #ifdef FEATURE_HW_INTRINSICS //------------------------------------------------------------------------ // fgOptimizeHWIntrinsic: optimize a HW intrinsic node // // Arguments: // node - HWIntrinsic node to examine // // Returns: // The original node if no optimization happened or if tree bashing occured. // An alternative tree if an optimization happened. // // Notes: // Checks for HWIntrinsic nodes: Vector64.Create/Vector128.Create/Vector256.Create, // and if the call is one of these, attempt to optimize. // This is post-order, meaning that it will not morph the children. // GenTree* Compiler::fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node) { assert(!optValnumCSE_phase); if (opts.OptimizationDisabled()) { return node; } switch (node->GetHWIntrinsicId()) { case NI_Vector128_Create: #if defined(TARGET_XARCH) case NI_Vector256_Create: #elif defined(TARGET_ARM64) case NI_Vector64_Create: #endif { bool hwAllArgsAreConstZero = true; for (GenTree* arg : node->Operands()) { if (!arg->IsIntegralConst(0) && !arg->IsFloatPositiveZero()) { hwAllArgsAreConstZero = false; break; } } if (hwAllArgsAreConstZero) { switch (node->GetHWIntrinsicId()) { case NI_Vector128_Create: { node->ResetHWIntrinsicId(NI_Vector128_get_Zero); break; } #if defined(TARGET_XARCH) case NI_Vector256_Create: { node->ResetHWIntrinsicId(NI_Vector256_get_Zero); break; } #elif defined(TARGET_ARM64) case NI_Vector64_Create: { node->ResetHWIntrinsicId(NI_Vector64_get_Zero); break; } #endif default: unreached(); } } break; } default: break; } return node; } #endif //------------------------------------------------------------------------ // fgOptimizeCommutativeArithmetic: Optimizes commutative operations. // // Arguments: // tree - the unchecked GT_ADD/GT_MUL/GT_OR/GT_XOR/GT_AND tree to optimize. // // Return Value: // The optimized tree that can have any shape. // GenTree* Compiler::fgOptimizeCommutativeArithmetic(GenTreeOp* tree) { assert(tree->OperIs(GT_ADD, GT_MUL, GT_OR, GT_XOR, GT_AND)); assert(!tree->gtOverflowEx()); // Commute constants to the right. if (tree->gtGetOp1()->OperIsConst() && !tree->gtGetOp1()->TypeIs(TYP_REF)) { // TODO-Review: We used to assert here that "(!op2->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD))". // This may indicate a missed "remorph". Task is to re-enable this assertion and investigate. std::swap(tree->gtOp1, tree->gtOp2); } if (fgOperIsBitwiseRotationRoot(tree->OperGet())) { GenTree* rotationTree = fgRecognizeAndMorphBitwiseRotation(tree); if (rotationTree != nullptr) { return rotationTree; } } if (fgGlobalMorph && tree->OperIs(GT_AND, GT_OR, GT_XOR)) { GenTree* castTree = fgMorphCastedBitwiseOp(tree->AsOp()); if (castTree != nullptr) { return castTree; } } if (varTypeIsIntegralOrI(tree)) { genTreeOps oldTreeOper = tree->OperGet(); GenTreeOp* optimizedTree = fgMorphCommutative(tree->AsOp()); if (optimizedTree != nullptr) { if (!optimizedTree->OperIs(oldTreeOper)) { // "optimizedTree" could end up being a COMMA. return optimizedTree; } tree = optimizedTree; } } if (!optValnumCSE_phase) { GenTree* optimizedTree = nullptr; if (tree->OperIs(GT_ADD)) { optimizedTree = fgOptimizeAddition(tree); } else if (tree->OperIs(GT_MUL)) { optimizedTree = fgOptimizeMultiply(tree); } else if (tree->OperIs(GT_AND)) { optimizedTree = fgOptimizeBitwiseAnd(tree); } if (optimizedTree != nullptr) { return optimizedTree; } } return tree; } //------------------------------------------------------------------------ // fgOptimizeAddition: optimizes addition. // // Arguments: // add - the unchecked GT_ADD tree to optimize. // // Return Value: // The optimized tree, that can have any shape, in case any transformations // were performed. Otherwise, "nullptr", guaranteeing no state change. // GenTree* Compiler::fgOptimizeAddition(GenTreeOp* add) { assert(add->OperIs(GT_ADD) && !add->gtOverflow()); assert(!optValnumCSE_phase); GenTree* op1 = add->gtGetOp1(); GenTree* op2 = add->gtGetOp2(); // Fold "((x + icon1) + (y + icon2))" to ((x + y) + (icon1 + icon2))". // Be careful not to create a byref pointer that may point outside of the ref object. // Only do this in global morph as we don't recompute the VN for "(x + y)", the new "op2". if (op1->OperIs(GT_ADD) && op2->OperIs(GT_ADD) && !op1->gtOverflow() && !op2->gtOverflow() && op1->AsOp()->gtGetOp2()->IsCnsIntOrI() && op2->AsOp()->gtGetOp2()->IsCnsIntOrI() && !varTypeIsGC(op1->AsOp()->gtGetOp1()) && !varTypeIsGC(op2->AsOp()->gtGetOp1()) && fgGlobalMorph) { GenTreeOp* addOne = op1->AsOp(); GenTreeOp* addTwo = op2->AsOp(); GenTreeIntCon* constOne = addOne->gtGetOp2()->AsIntCon(); GenTreeIntCon* constTwo = addTwo->gtGetOp2()->AsIntCon(); addOne->gtOp2 = addTwo->gtGetOp1(); addOne->SetAllEffectsFlags(addOne->gtGetOp1(), addOne->gtGetOp2()); DEBUG_DESTROY_NODE(addTwo); constOne->SetValueTruncating(constOne->IconValue() + constTwo->IconValue()); op2 = constOne; add->gtOp2 = constOne; DEBUG_DESTROY_NODE(constTwo); } // Fold (x + 0) - given it won't change the tree type to TYP_REF. // TODO-Bug: this code will lose the GC-ness of a tree like "native int + byref(0)". if (op2->IsIntegralConst(0) && ((add->TypeGet() == op1->TypeGet()) || !op1->TypeIs(TYP_REF))) { if (op2->IsCnsIntOrI() && varTypeIsI(op1)) { fgAddFieldSeqForZeroOffset(op1, op2->AsIntCon()->gtFieldSeq); } DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(add); return op1; } // Note that these transformations are legal for floating-point ADDs as well. if (opts.OptimizationEnabled()) { // - a + b = > b - a // ADD((NEG(a), b) => SUB(b, a) // Do not do this if "op2" is constant for canonicalization purposes. if (op1->OperIs(GT_NEG) && !op2->OperIs(GT_NEG) && !op2->IsIntegralConst() && gtCanSwapOrder(op1, op2)) { add->SetOper(GT_SUB); add->gtOp1 = op2; add->gtOp2 = op1->AsOp()->gtGetOp1(); DEBUG_DESTROY_NODE(op1); return add; } // a + -b = > a - b // ADD(a, (NEG(b)) => SUB(a, b) if (!op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG)) { add->SetOper(GT_SUB); add->gtOp2 = op2->AsOp()->gtGetOp1(); DEBUG_DESTROY_NODE(op2); return add; } } return nullptr; } //------------------------------------------------------------------------ // fgOptimizeMultiply: optimizes multiplication. // // Arguments: // mul - the unchecked TYP_I_IMPL/TYP_INT GT_MUL tree to optimize. // // Return Value: // The optimized tree, that can have any shape, in case any transformations // were performed. Otherwise, "nullptr", guaranteeing no state change. // GenTree* Compiler::fgOptimizeMultiply(GenTreeOp* mul) { assert(mul->OperIs(GT_MUL)); assert(varTypeIsIntOrI(mul) || varTypeIsFloating(mul)); assert(!mul->gtOverflow()); assert(!optValnumCSE_phase); GenTree* op1 = mul->gtGetOp1(); GenTree* op2 = mul->gtGetOp2(); assert(mul->TypeGet() == genActualType(op1)); assert(mul->TypeGet() == genActualType(op2)); if (opts.OptimizationEnabled() && op2->IsCnsFltOrDbl()) { double multiplierValue = op2->AsDblCon()->gtDconVal; if (multiplierValue == 1.0) { // Fold "x * 1.0" to "x". DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(mul); return op1; } // Fold "x * 2.0" to "x + x". // If op1 is not a local we will have to introduce a temporary via GT_COMMA. // Unfortunately, it's not optHoistLoopCode-friendly (yet), so we'll only do // this for locals / after hoisting has run (when rationalization remorphs // math INTRINSICSs into calls...). if ((multiplierValue == 2.0) && (op1->IsLocal() || (fgOrder == FGOrderLinear))) { op2 = fgMakeMultiUse(&op1); GenTree* add = gtNewOperNode(GT_ADD, mul->TypeGet(), op1, op2); INDEBUG(add->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return add; } } if (op2->IsIntegralConst()) { ssize_t mult = op2->AsIntConCommon()->IconValue(); bool op2IsConstIndex = op2->OperGet() == GT_CNS_INT && op2->AsIntCon()->gtFieldSeq != nullptr && op2->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq(); assert(!op2IsConstIndex || op2->AsIntCon()->gtFieldSeq->m_next == nullptr); if (mult == 0) { // We may be able to throw away op1 (unless it has side-effects) if ((op1->gtFlags & GTF_SIDE_EFFECT) == 0) { DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(mul); return op2; // Just return the "0" node } // We need to keep op1 for the side-effects. Hang it off a GT_COMMA node. mul->ChangeOper(GT_COMMA, GenTree::PRESERVE_VN); return mul; } #ifdef TARGET_XARCH // Should we try to replace integer multiplication with lea/add/shift sequences? bool mulShiftOpt = compCodeOpt() != SMALL_CODE; #else // !TARGET_XARCH bool mulShiftOpt = false; #endif // !TARGET_XARCH size_t abs_mult = (mult >= 0) ? mult : -mult; size_t lowestBit = genFindLowestBit(abs_mult); bool changeToShift = false; // is it a power of two? (positive or negative) if (abs_mult == lowestBit) { // if negative negate (min-int does not need negation) if (mult < 0 && mult != SSIZE_T_MIN) { op1 = gtNewOperNode(GT_NEG, genActualType(op1), op1); mul->gtOp1 = op1; fgMorphTreeDone(op1); } // If "op2" is a constant array index, the other multiplicand must be a constant. // Transfer the annotation to the other one. if (op2->OperGet() == GT_CNS_INT && op2->AsIntCon()->gtFieldSeq != nullptr && op2->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq()) { assert(op2->AsIntCon()->gtFieldSeq->m_next == nullptr); GenTree* otherOp = op1; if (otherOp->OperGet() == GT_NEG) { otherOp = otherOp->AsOp()->gtOp1; } assert(otherOp->OperGet() == GT_CNS_INT); assert(otherOp->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField()); otherOp->AsIntCon()->gtFieldSeq = op2->AsIntCon()->gtFieldSeq; } if (abs_mult == 1) { DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(mul); return op1; } // Change the multiplication into a shift by log2(val) bits. op2->AsIntConCommon()->SetIconValue(genLog2(abs_mult)); changeToShift = true; } else if (mulShiftOpt && (lowestBit > 1) && jitIsScaleIndexMul(lowestBit)) { int shift = genLog2(lowestBit); ssize_t factor = abs_mult >> shift; if (factor == 3 || factor == 5 || factor == 9) { // if negative negate (min-int does not need negation) if (mult < 0 && mult != SSIZE_T_MIN) { op1 = gtNewOperNode(GT_NEG, genActualType(op1), op1); mul->gtOp1 = op1; fgMorphTreeDone(op1); } GenTree* factorIcon = gtNewIconNode(factor, mul->TypeGet()); if (op2IsConstIndex) { factorIcon->AsIntCon()->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField); } // change the multiplication into a smaller multiplication (by 3, 5 or 9) and a shift op1 = gtNewOperNode(GT_MUL, mul->TypeGet(), op1, factorIcon); mul->gtOp1 = op1; fgMorphTreeDone(op1); op2->AsIntConCommon()->SetIconValue(shift); changeToShift = true; } } if (changeToShift) { fgUpdateConstTreeValueNumber(op2); mul->ChangeOper(GT_LSH, GenTree::PRESERVE_VN); return mul; } } return nullptr; } //------------------------------------------------------------------------ // fgOptimizeBitwiseAnd: optimizes the "and" operation. // // Arguments: // andOp - the GT_AND tree to optimize. // // Return Value: // The optimized tree, currently always a relop, in case any transformations // were performed. Otherwise, "nullptr", guaranteeing no state change. // GenTree* Compiler::fgOptimizeBitwiseAnd(GenTreeOp* andOp) { assert(andOp->OperIs(GT_AND)); assert(!optValnumCSE_phase); GenTree* op1 = andOp->gtGetOp1(); GenTree* op2 = andOp->gtGetOp2(); // Fold "cmp & 1" to just "cmp". if (andOp->TypeIs(TYP_INT) && op1->OperIsCompare() && op2->IsIntegralConst(1)) { DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(andOp); return op1; } return nullptr; } //------------------------------------------------------------------------ // fgOptimizeRelationalComparisonWithCasts: Recognizes comparisons against // various cast operands and tries to remove them. E.g.: // // * GE int // +--* CAST long <- ulong <- uint // | \--* X int // \--* CNS_INT long // // to: // // * GE_un int // +--* X int // \--* CNS_INT int // // same for: // // * GE int // +--* CAST long <- ulong <- uint // | \--* X int // \--* CAST long <- [u]long <- int // \--* ARR_LEN int // // These patterns quite often show up along with index checks // // Arguments: // cmp - the GT_LE/GT_LT/GT_GE/GT_GT tree to morph. // // Return Value: // Returns the same tree where operands might have narrower types // // Notes: // TODO-Casts: consider unifying this function with "optNarrowTree" // GenTree* Compiler::fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp) { assert(cmp->OperIs(GT_LE, GT_LT, GT_GE, GT_GT)); assert(!optValnumCSE_phase); GenTree* op1 = cmp->gtGetOp1(); GenTree* op2 = cmp->gtGetOp2(); // Caller is expected to call this function only if we have CAST nodes assert(op1->OperIs(GT_CAST) || op2->OperIs(GT_CAST)); if (!op1->TypeIs(TYP_LONG)) { // We can extend this logic to handle small types as well, but currently it's done mostly to // assist range check elimination return cmp; } GenTree* castOp; GenTree* knownPositiveOp; bool knownPositiveIsOp2; if (op2->IsIntegralConst() || ((op2->OperIs(GT_CAST) && op2->AsCast()->CastOp()->OperIs(GT_ARR_LENGTH)))) { // op2 is either a LONG constant or (T)ARR_LENGTH knownPositiveIsOp2 = true; castOp = cmp->gtGetOp1(); knownPositiveOp = cmp->gtGetOp2(); } else { // op1 is either a LONG constant (yes, it's pretty normal for relops) // or (T)ARR_LENGTH castOp = cmp->gtGetOp2(); knownPositiveOp = cmp->gtGetOp1(); knownPositiveIsOp2 = false; } if (castOp->OperIs(GT_CAST) && varTypeIsLong(castOp->CastToType()) && castOp->AsCast()->CastOp()->TypeIs(TYP_INT) && castOp->IsUnsigned() && !castOp->gtOverflow()) { bool knownPositiveFitsIntoU32 = false; if (knownPositiveOp->IsIntegralConst() && FitsIn<UINT32>(knownPositiveOp->AsIntConCommon()->IntegralValue())) { // BTW, we can fold the whole condition if op2 doesn't fit into UINT_MAX. knownPositiveFitsIntoU32 = true; } else if (knownPositiveOp->OperIs(GT_CAST) && varTypeIsLong(knownPositiveOp->CastToType()) && knownPositiveOp->AsCast()->CastOp()->OperIs(GT_ARR_LENGTH)) { knownPositiveFitsIntoU32 = true; // TODO-Casts: recognize Span.Length here as well. } if (!knownPositiveFitsIntoU32) { return cmp; } JITDUMP("Removing redundant cast(s) for:\n") DISPTREE(cmp) JITDUMP("\n\nto:\n\n") cmp->SetUnsigned(); // Drop cast from castOp if (knownPositiveIsOp2) { cmp->gtOp1 = castOp->AsCast()->CastOp(); } else { cmp->gtOp2 = castOp->AsCast()->CastOp(); } DEBUG_DESTROY_NODE(castOp); if (knownPositiveOp->OperIs(GT_CAST)) { // Drop cast from knownPositiveOp too if (knownPositiveIsOp2) { cmp->gtOp2 = knownPositiveOp->AsCast()->CastOp(); } else { cmp->gtOp1 = knownPositiveOp->AsCast()->CastOp(); } DEBUG_DESTROY_NODE(knownPositiveOp); } else { // Change type for constant from LONG to INT knownPositiveOp->ChangeType(TYP_INT); #ifndef TARGET_64BIT assert(knownPositiveOp->OperIs(GT_CNS_LNG)); knownPositiveOp->BashToConst(static_cast<int>(knownPositiveOp->AsIntConCommon()->IntegralValue())); #endif fgUpdateConstTreeValueNumber(knownPositiveOp); } DISPTREE(cmp) JITDUMP("\n") } return cmp; } //------------------------------------------------------------------------ // fgPropagateCommaThrow: propagate a "comma throw" up the tree. // // "Comma throws" in the compiler represent the canonical form of an always // throwing expression. They have the shape of COMMA(THROW, ZERO), to satisfy // the semantic that the original expression produced some value and are // generated by "gtFoldExprConst" when it encounters checked arithmetic that // will determinably overflow. // // In the global morphing phase, "comma throws" are "propagated" up the tree, // in post-order, to eliminate nodes that will never execute. This method, // called by "fgMorphSmpOp", encapsulates this optimization. // // Arguments: // parent - the node currently being processed. // commaThrow - the comma throw in question, "parent"'s operand. // precedingSideEffects - side effects of nodes preceding "comma" in execution order. // // Return Value: // If "parent" is to be replaced with a comma throw, i. e. the propagation was successful, // the new "parent", otherwise "nullptr", guaranteeing no state change, with one exception: // the "fgRemoveRestOfBlock" "global" may be set. Note that the new returned tree does not // have to be a "comma throw", it can be "bare" throw call if the "parent" node did not // produce any value. // // Notes: // "Comma throws" are very rare. // GenTree* Compiler::fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects) { // Comma throw propagation does not preserve VNs, and deletes nodes. assert(fgGlobalMorph); assert(fgIsCommaThrow(commaThrow)); if ((commaThrow->gtFlags & GTF_COLON_COND) == 0) { fgRemoveRestOfBlock = true; } if ((precedingSideEffects & GTF_ALL_EFFECT) == 0) { if (parent->TypeIs(TYP_VOID)) { // Return the throw node as the new tree. return commaThrow->gtGetOp1(); } // Fix up the COMMA's type if needed. if (genActualType(parent) != genActualType(commaThrow)) { commaThrow->gtGetOp2()->BashToZeroConst(genActualType(parent)); commaThrow->ChangeType(genActualType(parent)); } return commaThrow; } return nullptr; } //---------------------------------------------------------------------------------------------- // fgMorphRetInd: Try to get rid of extra IND(ADDR()) pairs in a return tree. // // Arguments: // node - The return node that uses an indirection. // // Return Value: // the original op1 of the ret if there was no optimization or an optimized new op1. // GenTree* Compiler::fgMorphRetInd(GenTreeUnOp* ret) { assert(ret->OperIs(GT_RETURN)); assert(ret->gtGetOp1()->OperIs(GT_IND, GT_BLK, GT_OBJ)); GenTreeIndir* ind = ret->gtGetOp1()->AsIndir(); GenTree* addr = ind->Addr(); if (addr->OperIs(GT_ADDR) && addr->gtGetOp1()->OperIs(GT_LCL_VAR)) { // If struct promotion was undone, adjust the annotations if (fgGlobalMorph && fgMorphImplicitByRefArgs(addr)) { return ind; } // If `return` retypes LCL_VAR as a smaller struct it should not set `doNotEnregister` on that // LclVar. // Example: in `Vector128:AsVector2` we have RETURN SIMD8(OBJ SIMD8(ADDR byref(LCL_VAR SIMD16))). GenTreeLclVar* lclVar = addr->gtGetOp1()->AsLclVar(); if (!lvaIsImplicitByRefLocal(lclVar->GetLclNum())) { assert(!gtIsActiveCSE_Candidate(addr) && !gtIsActiveCSE_Candidate(ind)); unsigned indSize; if (ind->OperIs(GT_IND)) { indSize = genTypeSize(ind); } else { indSize = ind->AsBlk()->GetLayout()->GetSize(); } LclVarDsc* varDsc = lvaGetDesc(lclVar); unsigned lclVarSize; if (!lclVar->TypeIs(TYP_STRUCT)) { lclVarSize = genTypeSize(varDsc->TypeGet()); } else { lclVarSize = varDsc->lvExactSize; } // TODO: change conditions in `canFold` to `indSize <= lclVarSize`, but currently do not support `BITCAST // int<-SIMD16` etc. assert((indSize <= lclVarSize) || varDsc->lvDoNotEnregister); #if defined(TARGET_64BIT) bool canFold = (indSize == lclVarSize); #else // !TARGET_64BIT // TODO: improve 32 bit targets handling for LONG returns if necessary, nowadays we do not support `BITCAST // long<->double` there. bool canFold = (indSize == lclVarSize) && (lclVarSize <= REGSIZE_BYTES); #endif // TODO: support `genReturnBB != nullptr`, it requires #11413 to avoid `Incompatible types for // gtNewTempAssign`. if (canFold && (genReturnBB == nullptr)) { // Fold (TYPE1)*(&(TYPE2)x) even if types do not match, lowering will handle it. // Getting rid of this IND(ADDR()) pair allows to keep lclVar as not address taken // and enregister it. DEBUG_DESTROY_NODE(ind); DEBUG_DESTROY_NODE(addr); ret->gtOp1 = lclVar; // We use GTF_DONT_CSE as an "is under GT_ADDR" check. We can // get rid of it now since the GT_RETURN node should never have // its address taken. assert((ret->gtFlags & GTF_DONT_CSE) == 0); lclVar->gtFlags &= ~GTF_DONT_CSE; return lclVar; } else if (!varDsc->lvDoNotEnregister) { lvaSetVarDoNotEnregister(lclVar->GetLclNum() DEBUGARG(DoNotEnregisterReason::BlockOpRet)); } } } return ind; } #ifdef _PREFAST_ #pragma warning(pop) #endif GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree) { genTreeOps oper = tree->gtOper; GenTree* op1 = tree->gtOp1; GenTree* op2 = tree->gtOp2; var_types typ = tree->TypeGet(); if (fgGlobalMorph && GenTree::OperIsCommutative(oper)) { /* Swap the operands so that the more expensive one is 'op1' */ if (tree->gtFlags & GTF_REVERSE_OPS) { tree->gtOp1 = op2; tree->gtOp2 = op1; op2 = op1; op1 = tree->gtOp1; tree->gtFlags &= ~GTF_REVERSE_OPS; } if (oper == op2->gtOper) { /* Reorder nested operators at the same precedence level to be left-recursive. For example, change "(a+(b+c))" to the equivalent expression "((a+b)+c)". */ /* Things are handled differently for floating-point operators */ if (!varTypeIsFloating(tree->TypeGet())) { fgMoveOpsLeft(tree); op1 = tree->gtOp1; op2 = tree->gtOp2; } } } #if REARRANGE_ADDS /* Change "((x+icon)+y)" to "((x+y)+icon)" Don't reorder floating-point operations */ if (fgGlobalMorph && (oper == GT_ADD) && !tree->gtOverflow() && (op1->gtOper == GT_ADD) && !op1->gtOverflow() && varTypeIsIntegralOrI(typ)) { GenTree* ad1 = op1->AsOp()->gtOp1; GenTree* ad2 = op1->AsOp()->gtOp2; if (!op2->OperIsConst() && ad2->OperIsConst()) { // This takes // + (tree) // / \. // / \. // / \. // + (op1) op2 // / \. // / \. // ad1 ad2 // // and it swaps ad2 and op2. // Don't create a byref pointer that may point outside of the ref object. // If a GC happens, the byref won't get updated. This can happen if one // of the int components is negative. It also requires the address generation // be in a fully-interruptible code region. if (!varTypeIsGC(ad1->TypeGet()) && !varTypeIsGC(op2->TypeGet())) { tree->gtOp2 = ad2; op1->AsOp()->gtOp2 = op2; op1->gtFlags |= op2->gtFlags & GTF_ALL_EFFECT; op2 = tree->gtOp2; } } } #endif /*------------------------------------------------------------------------- * Perform optional oper-specific postorder morphing */ switch (oper) { case GT_ASG: // Make sure we're allowed to do this. if (optValnumCSE_phase) { // It is not safe to reorder/delete CSE's break; } if (varTypeIsStruct(typ) && !tree->IsPhiDefn()) { if (tree->OperIsCopyBlkOp()) { return fgMorphCopyBlock(tree); } else { return fgMorphInitBlock(tree); } } if (typ == TYP_LONG) { break; } if (op2->gtFlags & GTF_ASG) { break; } if ((op2->gtFlags & GTF_CALL) && (op1->gtFlags & GTF_ALL_EFFECT)) { break; } /* Special case: a cast that can be thrown away */ // TODO-Cleanup: fgMorphSmp does a similar optimization. However, it removes only // one cast and sometimes there is another one after it that gets removed by this // code. fgMorphSmp should be improved to remove all redundant casts so this code // can be removed. if (op1->gtOper == GT_IND && op2->gtOper == GT_CAST && !op2->gtOverflow()) { var_types srct; var_types cast; var_types dstt; srct = op2->AsCast()->CastOp()->TypeGet(); cast = (var_types)op2->CastToType(); dstt = op1->TypeGet(); /* Make sure these are all ints and precision is not lost */ if (genTypeSize(cast) >= genTypeSize(dstt) && dstt <= TYP_INT && srct <= TYP_INT) { op2 = tree->gtOp2 = op2->AsCast()->CastOp(); } } break; case GT_MUL: /* Check for the case "(val + icon) * icon" */ if (op2->gtOper == GT_CNS_INT && op1->gtOper == GT_ADD) { GenTree* add = op1->AsOp()->gtOp2; if (add->IsCnsIntOrI() && (op2->GetScaleIndexMul() != 0)) { if (tree->gtOverflow() || op1->gtOverflow()) { break; } ssize_t imul = op2->AsIntCon()->gtIconVal; ssize_t iadd = add->AsIntCon()->gtIconVal; /* Change '(val + iadd) * imul' -> '(val * imul) + (iadd * imul)' */ oper = GT_ADD; tree->ChangeOper(oper); op2->AsIntCon()->SetValueTruncating(iadd * imul); op1->ChangeOper(GT_MUL); add->AsIntCon()->SetIconValue(imul); } } break; case GT_DIV: /* For "val / 1", just return "val" */ if (op2->IsIntegralConst(1)) { DEBUG_DESTROY_NODE(tree); return op1; } break; case GT_UDIV: case GT_UMOD: tree->CheckDivideByConstOptimized(this); break; case GT_LSH: /* Check for the case "(val + icon) << icon" */ if (!optValnumCSE_phase && op2->IsCnsIntOrI() && op1->gtOper == GT_ADD && !op1->gtOverflow()) { GenTree* cns = op1->AsOp()->gtOp2; if (cns->IsCnsIntOrI() && (op2->GetScaleIndexShf() != 0)) { ssize_t ishf = op2->AsIntConCommon()->IconValue(); ssize_t iadd = cns->AsIntConCommon()->IconValue(); // printf("Changing '(val+icon1)<<icon2' into '(val<<icon2+icon1<<icon2)'\n"); /* Change "(val + iadd) << ishf" into "(val<<ishf + iadd<<ishf)" */ tree->ChangeOper(GT_ADD); // we are reusing the shift amount node here, but the type we want is that of the shift result op2->gtType = op1->gtType; op2->AsIntConCommon()->SetValueTruncating(iadd << ishf); if (cns->gtOper == GT_CNS_INT && cns->AsIntCon()->gtFieldSeq != nullptr && cns->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq()) { assert(cns->AsIntCon()->gtFieldSeq->m_next == nullptr); op2->AsIntCon()->gtFieldSeq = cns->AsIntCon()->gtFieldSeq; } op1->ChangeOper(GT_LSH); cns->AsIntConCommon()->SetIconValue(ishf); } } break; case GT_XOR: if (!optValnumCSE_phase) { /* "x ^ -1" is "~x" */ if (op2->IsIntegralConst(-1)) { tree->ChangeOper(GT_NOT); tree->gtOp2 = nullptr; DEBUG_DESTROY_NODE(op2); } else if (op2->IsIntegralConst(1) && op1->OperIsCompare()) { /* "binaryVal ^ 1" is "!binaryVal" */ gtReverseCond(op1); DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(tree); return op1; } } break; case GT_INIT_VAL: // Initialization values for initBlk have special semantics - their lower // byte is used to fill the struct. However, we allow 0 as a "bare" value, // which enables them to get a VNForZero, and be propagated. if (op1->IsIntegralConst(0)) { return op1; } break; default: break; } return tree; } #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) //------------------------------------------------------------------------ // fgMorphMultiOp: Morph a GenTreeMultiOp (SIMD/HWINTRINSIC) tree. // // Arguments: // multiOp - The tree to morph // // Return Value: // The fully morphed tree. // GenTree* Compiler::fgMorphMultiOp(GenTreeMultiOp* multiOp) { gtUpdateNodeOperSideEffects(multiOp); bool dontCseConstArguments = false; #if defined(FEATURE_HW_INTRINSICS) // Opportunistically, avoid unexpected CSE for hw intrinsics with IMM arguments if (multiOp->OperIs(GT_HWINTRINSIC)) { NamedIntrinsic hwIntrinsic = multiOp->AsHWIntrinsic()->GetHWIntrinsicId(); #if defined(TARGET_XARCH) if (HWIntrinsicInfo::lookupCategory(hwIntrinsic) == HW_Category_IMM) { dontCseConstArguments = true; } #elif defined(TARGET_ARMARCH) if (HWIntrinsicInfo::HasImmediateOperand(hwIntrinsic)) { dontCseConstArguments = true; } #endif } #endif for (GenTree** use : multiOp->UseEdges()) { *use = fgMorphTree(*use); GenTree* operand = *use; multiOp->gtFlags |= (operand->gtFlags & GTF_ALL_EFFECT); if (dontCseConstArguments && operand->OperIsConst()) { operand->SetDoNotCSE(); } // Promoted structs after morph must be in one of two states: // a) Fully eliminated from the IR (independent promotion) OR only be // used by "special" nodes (e. g. LHS of ASGs for multi-reg structs). // b) Marked as do-not-enregister (dependent promotion). // // So here we preserve this invariant and mark any promoted structs as do-not-enreg. // if (operand->OperIs(GT_LCL_VAR) && lvaGetDesc(operand->AsLclVar())->lvPromoted) { lvaSetVarDoNotEnregister(operand->AsLclVar()->GetLclNum() DEBUGARG(DoNotEnregisterReason::SimdUserForcesDep)); } } #if defined(FEATURE_HW_INTRINSICS) if (opts.OptimizationEnabled() && multiOp->OperIs(GT_HWINTRINSIC)) { GenTreeHWIntrinsic* hw = multiOp->AsHWIntrinsic(); switch (hw->GetHWIntrinsicId()) { #if defined(TARGET_XARCH) case NI_SSE_Xor: case NI_SSE2_Xor: case NI_AVX_Xor: case NI_AVX2_Xor: { // Transform XOR(X, 0) to X for vectors GenTree* op1 = hw->Op(1); GenTree* op2 = hw->Op(2); if (!gtIsActiveCSE_Candidate(hw)) { if (op1->IsIntegralConstVector(0) && !gtIsActiveCSE_Candidate(op1)) { DEBUG_DESTROY_NODE(hw); DEBUG_DESTROY_NODE(op1); return op2; } if (op2->IsIntegralConstVector(0) && !gtIsActiveCSE_Candidate(op2)) { DEBUG_DESTROY_NODE(hw); DEBUG_DESTROY_NODE(op2); return op1; } } break; } #endif case NI_Vector128_Create: #if defined(TARGET_XARCH) case NI_Vector256_Create: #elif defined(TARGET_ARMARCH) case NI_Vector64_Create: #endif { bool hwAllArgsAreConst = true; for (GenTree** use : multiOp->UseEdges()) { if (!(*use)->OperIsConst()) { hwAllArgsAreConst = false; break; } } // Avoid unexpected CSE for constant arguments for Vector_.Create // but only if all arguments are constants. if (hwAllArgsAreConst) { for (GenTree** use : multiOp->UseEdges()) { (*use)->SetDoNotCSE(); } } } break; default: break; } } #endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) #ifdef FEATURE_HW_INTRINSICS if (multiOp->OperIsHWIntrinsic() && !optValnumCSE_phase) { return fgOptimizeHWIntrinsic(multiOp->AsHWIntrinsic()); } #endif return multiOp; } #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) //------------------------------------------------------------------------ // fgMorphModToSubMulDiv: Transform a % b into the equivalent a - (a / b) * b // (see ECMA III 3.55 and III.3.56). // // Arguments: // tree - The GT_MOD/GT_UMOD tree to morph // // Returns: // The morphed tree // // Notes: // For ARM64 we don't have a remainder instruction so this transform is // always done. For XARCH this transform is done if we know that magic // division will be used, in that case this transform allows CSE to // eliminate the redundant div from code like "x = a / 3; y = a % 3;". // GenTree* Compiler::fgMorphModToSubMulDiv(GenTreeOp* tree) { JITDUMP("\nMorphing MOD/UMOD [%06u] to Sub/Mul/Div\n", dspTreeID(tree)); if (tree->OperGet() == GT_MOD) { tree->SetOper(GT_DIV); } else if (tree->OperGet() == GT_UMOD) { tree->SetOper(GT_UDIV); } else { noway_assert(!"Illegal gtOper in fgMorphModToSubMulDiv"); } var_types type = tree->gtType; GenTree* const copyOfNumeratorValue = fgMakeMultiUse(&tree->gtOp1); GenTree* const copyOfDenominatorValue = fgMakeMultiUse(&tree->gtOp2); GenTree* const mul = gtNewOperNode(GT_MUL, type, tree, copyOfDenominatorValue); GenTree* const sub = gtNewOperNode(GT_SUB, type, copyOfNumeratorValue, mul); // Ensure "sub" does not evaluate "copyOfNumeratorValue" before it is defined by "mul". // sub->gtFlags |= GTF_REVERSE_OPS; #ifdef DEBUG sub->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif tree->CheckDivideByConstOptimized(this); return sub; } //------------------------------------------------------------------------------ // fgOperIsBitwiseRotationRoot : Check if the operation can be a root of a bitwise rotation tree. // // // Arguments: // oper - Operation to check // // Return Value: // True if the operation can be a root of a bitwise rotation tree; false otherwise. bool Compiler::fgOperIsBitwiseRotationRoot(genTreeOps oper) { return (oper == GT_OR) || (oper == GT_XOR); } //------------------------------------------------------------------------------ // fgRecognizeAndMorphBitwiseRotation : Check if the tree represents a left or right rotation. If so, return // an equivalent GT_ROL or GT_ROR tree; otherwise, return the original tree. // // Arguments: // tree - tree to check for a rotation pattern // // Return Value: // An equivalent GT_ROL or GT_ROR tree if a pattern is found; "nullptr" otherwise. // // Assumption: // The input is a GT_OR or a GT_XOR tree. GenTree* Compiler::fgRecognizeAndMorphBitwiseRotation(GenTree* tree) { // // Check for a rotation pattern, e.g., // // OR ROL // / \ / \. // LSH RSZ -> x y // / \ / \. // x AND x AND // / \ / \. // y 31 ADD 31 // / \. // NEG 32 // | // y // The patterns recognized: // (x << (y & M)) op (x >>> ((-y + N) & M)) // (x >>> ((-y + N) & M)) op (x << (y & M)) // // (x << y) op (x >>> (-y + N)) // (x >> > (-y + N)) op (x << y) // // (x >>> (y & M)) op (x << ((-y + N) & M)) // (x << ((-y + N) & M)) op (x >>> (y & M)) // // (x >>> y) op (x << (-y + N)) // (x << (-y + N)) op (x >>> y) // // (x << c1) op (x >>> c2) // (x >>> c1) op (x << c2) // // where // c1 and c2 are const // c1 + c2 == bitsize(x) // N == bitsize(x) // M is const // M & (N - 1) == N - 1 // op is either | or ^ if (((tree->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0) || ((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0)) { // We can't do anything if the tree has assignments, calls, or volatile // reads. Note that we allow GTF_EXCEPT side effect since any exceptions // thrown by the original tree will be thrown by the transformed tree as well. return nullptr; } genTreeOps oper = tree->OperGet(); assert(fgOperIsBitwiseRotationRoot(oper)); // Check if we have an LSH on one side of the OR and an RSZ on the other side. GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); GenTree* leftShiftTree = nullptr; GenTree* rightShiftTree = nullptr; if ((op1->OperGet() == GT_LSH) && (op2->OperGet() == GT_RSZ)) { leftShiftTree = op1; rightShiftTree = op2; } else if ((op1->OperGet() == GT_RSZ) && (op2->OperGet() == GT_LSH)) { leftShiftTree = op2; rightShiftTree = op1; } else { return nullptr; } // Check if the trees representing the value to shift are identical. // We already checked that there are no side effects above. if (GenTree::Compare(leftShiftTree->gtGetOp1(), rightShiftTree->gtGetOp1())) { GenTree* rotatedValue = leftShiftTree->gtGetOp1(); var_types rotatedValueActualType = genActualType(rotatedValue->gtType); ssize_t rotatedValueBitSize = genTypeSize(rotatedValueActualType) * 8; noway_assert((rotatedValueBitSize == 32) || (rotatedValueBitSize == 64)); GenTree* leftShiftIndex = leftShiftTree->gtGetOp2(); GenTree* rightShiftIndex = rightShiftTree->gtGetOp2(); // The shift index may be masked. At least (rotatedValueBitSize - 1) lower bits // shouldn't be masked for the transformation to be valid. If additional // higher bits are not masked, the transformation is still valid since the result // of MSIL shift instructions is unspecified if the shift amount is greater or equal // than the width of the value being shifted. ssize_t minimalMask = rotatedValueBitSize - 1; ssize_t leftShiftMask = -1; ssize_t rightShiftMask = -1; if ((leftShiftIndex->OperGet() == GT_AND)) { if (leftShiftIndex->gtGetOp2()->IsCnsIntOrI()) { leftShiftMask = leftShiftIndex->gtGetOp2()->AsIntCon()->gtIconVal; leftShiftIndex = leftShiftIndex->gtGetOp1(); } else { return nullptr; } } if ((rightShiftIndex->OperGet() == GT_AND)) { if (rightShiftIndex->gtGetOp2()->IsCnsIntOrI()) { rightShiftMask = rightShiftIndex->gtGetOp2()->AsIntCon()->gtIconVal; rightShiftIndex = rightShiftIndex->gtGetOp1(); } else { return nullptr; } } if (((minimalMask & leftShiftMask) != minimalMask) || ((minimalMask & rightShiftMask) != minimalMask)) { // The shift index is overmasked, e.g., we have // something like (x << y & 15) or // (x >> (32 - y) & 15 with 32 bit x. // The transformation is not valid. return nullptr; } GenTree* shiftIndexWithAdd = nullptr; GenTree* shiftIndexWithoutAdd = nullptr; genTreeOps rotateOp = GT_NONE; GenTree* rotateIndex = nullptr; if (leftShiftIndex->OperGet() == GT_ADD) { shiftIndexWithAdd = leftShiftIndex; shiftIndexWithoutAdd = rightShiftIndex; rotateOp = GT_ROR; } else if (rightShiftIndex->OperGet() == GT_ADD) { shiftIndexWithAdd = rightShiftIndex; shiftIndexWithoutAdd = leftShiftIndex; rotateOp = GT_ROL; } if (shiftIndexWithAdd != nullptr) { if (shiftIndexWithAdd->gtGetOp2()->IsCnsIntOrI()) { if (shiftIndexWithAdd->gtGetOp2()->AsIntCon()->gtIconVal == rotatedValueBitSize) { if (shiftIndexWithAdd->gtGetOp1()->OperGet() == GT_NEG) { if (GenTree::Compare(shiftIndexWithAdd->gtGetOp1()->gtGetOp1(), shiftIndexWithoutAdd)) { // We found one of these patterns: // (x << (y & M)) | (x >>> ((-y + N) & M)) // (x << y) | (x >>> (-y + N)) // (x >>> (y & M)) | (x << ((-y + N) & M)) // (x >>> y) | (x << (-y + N)) // where N == bitsize(x), M is const, and // M & (N - 1) == N - 1 CLANG_FORMAT_COMMENT_ANCHOR; #ifndef TARGET_64BIT if (!shiftIndexWithoutAdd->IsCnsIntOrI() && (rotatedValueBitSize == 64)) { // TODO-X86-CQ: we need to handle variable-sized long shifts specially on x86. // GT_LSH, GT_RSH, and GT_RSZ have helpers for this case. We may need // to add helpers for GT_ROL and GT_ROR. return nullptr; } #endif rotateIndex = shiftIndexWithoutAdd; } } } } } else if ((leftShiftIndex->IsCnsIntOrI() && rightShiftIndex->IsCnsIntOrI())) { if (leftShiftIndex->AsIntCon()->gtIconVal + rightShiftIndex->AsIntCon()->gtIconVal == rotatedValueBitSize) { // We found this pattern: // (x << c1) | (x >>> c2) // where c1 and c2 are const and c1 + c2 == bitsize(x) rotateOp = GT_ROL; rotateIndex = leftShiftIndex; } } if (rotateIndex != nullptr) { noway_assert(GenTree::OperIsRotate(rotateOp)); GenTreeFlags inputTreeEffects = tree->gtFlags & GTF_ALL_EFFECT; // We can use the same tree only during global morph; reusing the tree in a later morph // may invalidate value numbers. if (fgGlobalMorph) { tree->AsOp()->gtOp1 = rotatedValue; tree->AsOp()->gtOp2 = rotateIndex; tree->ChangeOper(rotateOp); unsigned childFlags = 0; for (GenTree* op : tree->Operands()) { childFlags |= (op->gtFlags & GTF_ALL_EFFECT); } // The parent's flags should be a superset of its operands' flags noway_assert((inputTreeEffects & childFlags) == childFlags); } else { tree = gtNewOperNode(rotateOp, rotatedValueActualType, rotatedValue, rotateIndex); noway_assert(inputTreeEffects == (tree->gtFlags & GTF_ALL_EFFECT)); } return tree; } } return nullptr; } #if !defined(TARGET_64BIT) //------------------------------------------------------------------------------ // fgRecognizeAndMorphLongMul : Check for and morph long multiplication with 32 bit operands. // // Uses "GenTree::IsValidLongMul" to check for the long multiplication pattern. Will swap // operands if the first one is a constant and the second one is not, even for trees which // end up not being eligibile for long multiplication. // // Arguments: // mul - GT_MUL tree to check for a long multiplication opportunity // // Return Value: // The original tree, with operands possibly swapped, if it is not eligible for long multiplication. // Tree with GTF_MUL_64RSLT set, side effect flags propagated, and children morphed if it is. // GenTreeOp* Compiler::fgRecognizeAndMorphLongMul(GenTreeOp* mul) { assert(mul->OperIs(GT_MUL)); assert(mul->TypeIs(TYP_LONG)); GenTree* op1 = mul->gtGetOp1(); GenTree* op2 = mul->gtGetOp2(); // "IsValidLongMul" and decomposition do not handle constant op1. if (op1->IsIntegralConst()) { std::swap(op1, op2); mul->gtOp1 = op1; mul->gtOp2 = op2; } if (!mul->IsValidLongMul()) { return mul; } // MUL_LONG needs to do the work the casts would have done. mul->ClearUnsigned(); if (op1->IsUnsigned()) { mul->SetUnsigned(); } // "IsValidLongMul" returned "true", so this GT_MUL cannot overflow. mul->ClearOverflow(); mul->Set64RsltMul(); return fgMorphLongMul(mul); } //------------------------------------------------------------------------------ // fgMorphLongMul : Morphs GT_MUL nodes marked with GTF_MUL_64RSLT. // // Morphs *only* the operands of casts that compose the long mul to // avoid them being folded aways. // // Arguments: // mul - GT_MUL tree to morph operands of // // Return Value: // The original tree, with operands morphed and flags propagated. // GenTreeOp* Compiler::fgMorphLongMul(GenTreeOp* mul) { INDEBUG(mul->DebugCheckLongMul()); GenTree* op1 = mul->gtGetOp1(); GenTree* op2 = mul->gtGetOp2(); // Morph the operands. We cannot allow the casts to go away, so we morph their operands directly. op1->AsCast()->CastOp() = fgMorphTree(op1->AsCast()->CastOp()); op1->SetAllEffectsFlags(op1->AsCast()->CastOp()); if (op2->OperIs(GT_CAST)) { op2->AsCast()->CastOp() = fgMorphTree(op2->AsCast()->CastOp()); op2->SetAllEffectsFlags(op2->AsCast()->CastOp()); } mul->SetAllEffectsFlags(op1, op2); op1->SetDoNotCSE(); op2->SetDoNotCSE(); return mul; } #endif // !defined(TARGET_64BIT) /***************************************************************************** * * Transform the given tree for code generation and return an equivalent tree. */ GenTree* Compiler::fgMorphTree(GenTree* tree, MorphAddrContext* mac) { assert(tree); #ifdef DEBUG if (verbose) { if ((unsigned)JitConfig.JitBreakMorphTree() == tree->gtTreeID) { noway_assert(!"JitBreakMorphTree hit"); } } #endif #ifdef DEBUG int thisMorphNum = 0; if (verbose && treesBeforeAfterMorph) { thisMorphNum = morphNum++; printf("\nfgMorphTree (before %d):\n", thisMorphNum); gtDispTree(tree); } #endif if (fgGlobalMorph) { // Apply any rewrites for implicit byref arguments before morphing the // tree. if (fgMorphImplicitByRefArgs(tree)) { #ifdef DEBUG if (verbose && treesBeforeAfterMorph) { printf("\nfgMorphTree (%d), after implicit-byref rewrite:\n", thisMorphNum); gtDispTree(tree); } #endif } } /*------------------------------------------------------------------------- * fgMorphTree() can potentially replace a tree with another, and the * caller has to store the return value correctly. * Turn this on to always make copy of "tree" here to shake out * hidden/unupdated references. */ #ifdef DEBUG if (compStressCompile(STRESS_GENERIC_CHECK, 0)) { GenTree* copy; if (GenTree::s_gtNodeSizes[tree->gtOper] == TREE_NODE_SZ_SMALL) { copy = gtNewLargeOperNode(GT_ADD, TYP_INT); } else { copy = new (this, GT_CALL) GenTreeCall(TYP_INT); } copy->ReplaceWith(tree, this); #if defined(LATE_DISASM) // GT_CNS_INT is considered small, so ReplaceWith() won't copy all fields if ((tree->gtOper == GT_CNS_INT) && tree->IsIconHandle()) { copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle; } #endif DEBUG_DESTROY_NODE(tree); tree = copy; } #endif // DEBUG if (fgGlobalMorph) { /* Ensure that we haven't morphed this node already */ assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0) && "ERROR: Already morphed this node!"); /* Before morphing the tree, we try to propagate any active assertions */ if (optLocalAssertionProp) { /* Do we have any active assertions? */ if (optAssertionCount > 0) { GenTree* newTree = tree; while (newTree != nullptr) { tree = newTree; /* newTree is non-Null if we propagated an assertion */ newTree = optAssertionProp(apFull, tree, nullptr, nullptr); } assert(tree != nullptr); } } PREFAST_ASSUME(tree != nullptr); } /* Save the original un-morphed tree for fgMorphTreeDone */ GenTree* oldTree = tree; /* Figure out what kind of a node we have */ unsigned kind = tree->OperKind(); /* Is this a constant node? */ if (tree->OperIsConst()) { tree = fgMorphConst(tree); goto DONE; } /* Is this a leaf node? */ if (kind & GTK_LEAF) { tree = fgMorphLeaf(tree); goto DONE; } /* Is it a 'simple' unary/binary operator? */ if (kind & GTK_SMPOP) { tree = fgMorphSmpOp(tree, mac); goto DONE; } /* See what kind of a special operator we have here */ switch (tree->OperGet()) { case GT_CALL: if (tree->OperMayThrow(this)) { tree->gtFlags |= GTF_EXCEPT; } else { tree->gtFlags &= ~GTF_EXCEPT; } tree = fgMorphCall(tree->AsCall()); break; #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) #if defined(FEATURE_SIMD) case GT_SIMD: #endif #if defined(FEATURE_HW_INTRINSICS) case GT_HWINTRINSIC: #endif tree = fgMorphMultiOp(tree->AsMultiOp()); break; #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) case GT_ARR_ELEM: tree->AsArrElem()->gtArrObj = fgMorphTree(tree->AsArrElem()->gtArrObj); unsigned dim; for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++) { tree->AsArrElem()->gtArrInds[dim] = fgMorphTree(tree->AsArrElem()->gtArrInds[dim]); } tree->gtFlags &= ~GTF_CALL; tree->gtFlags |= tree->AsArrElem()->gtArrObj->gtFlags & GTF_ALL_EFFECT; for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++) { tree->gtFlags |= tree->AsArrElem()->gtArrInds[dim]->gtFlags & GTF_ALL_EFFECT; } if (fgGlobalMorph) { fgSetRngChkTarget(tree, false); } break; case GT_ARR_OFFSET: tree->AsArrOffs()->gtOffset = fgMorphTree(tree->AsArrOffs()->gtOffset); tree->AsArrOffs()->gtIndex = fgMorphTree(tree->AsArrOffs()->gtIndex); tree->AsArrOffs()->gtArrObj = fgMorphTree(tree->AsArrOffs()->gtArrObj); tree->gtFlags &= ~GTF_CALL; tree->gtFlags |= tree->AsArrOffs()->gtOffset->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsArrOffs()->gtIndex->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsArrOffs()->gtArrObj->gtFlags & GTF_ALL_EFFECT; if (fgGlobalMorph) { fgSetRngChkTarget(tree, false); } break; case GT_PHI: tree->gtFlags &= ~GTF_ALL_EFFECT; for (GenTreePhi::Use& use : tree->AsPhi()->Uses()) { use.SetNode(fgMorphTree(use.GetNode())); tree->gtFlags |= use.GetNode()->gtFlags & GTF_ALL_EFFECT; } break; case GT_FIELD_LIST: tree->gtFlags &= ~GTF_ALL_EFFECT; for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses()) { use.SetNode(fgMorphTree(use.GetNode())); tree->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT); } break; case GT_CMPXCHG: tree->AsCmpXchg()->gtOpLocation = fgMorphTree(tree->AsCmpXchg()->gtOpLocation); tree->AsCmpXchg()->gtOpValue = fgMorphTree(tree->AsCmpXchg()->gtOpValue); tree->AsCmpXchg()->gtOpComparand = fgMorphTree(tree->AsCmpXchg()->gtOpComparand); tree->gtFlags &= (~GTF_EXCEPT & ~GTF_CALL); tree->gtFlags |= tree->AsCmpXchg()->gtOpLocation->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsCmpXchg()->gtOpValue->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsCmpXchg()->gtOpComparand->gtFlags & GTF_ALL_EFFECT; break; case GT_STORE_DYN_BLK: tree = fgMorphStoreDynBlock(tree->AsStoreDynBlk()); break; default: #ifdef DEBUG gtDispTree(tree); #endif noway_assert(!"unexpected operator"); } DONE: fgMorphTreeDone(tree, oldTree DEBUGARG(thisMorphNum)); return tree; } //------------------------------------------------------------------------ // fgKillDependentAssertionsSingle: Kill all assertions specific to lclNum // // Arguments: // lclNum - The varNum of the lclVar for which we're killing assertions. // tree - (DEBUG only) the tree responsible for killing its assertions. // void Compiler::fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree)) { /* All dependent assertions are killed here */ ASSERT_TP killed = BitVecOps::MakeCopy(apTraits, GetAssertionDep(lclNum)); if (killed) { AssertionIndex index = optAssertionCount; while (killed && (index > 0)) { if (BitVecOps::IsMember(apTraits, killed, index - 1)) { #ifdef DEBUG AssertionDsc* curAssertion = optGetAssertion(index); noway_assert((curAssertion->op1.lcl.lclNum == lclNum) || ((curAssertion->op2.kind == O2K_LCLVAR_COPY) && (curAssertion->op2.lcl.lclNum == lclNum))); if (verbose) { printf("\nThe assignment "); printTreeID(tree); printf(" using V%02u removes: ", curAssertion->op1.lcl.lclNum); optPrintAssertion(curAssertion); } #endif // Remove this bit from the killed mask BitVecOps::RemoveElemD(apTraits, killed, index - 1); optAssertionRemove(index); } index--; } // killed mask should now be zero noway_assert(BitVecOps::IsEmpty(apTraits, killed)); } } //------------------------------------------------------------------------ // fgKillDependentAssertions: Kill all dependent assertions with regard to lclNum. // // Arguments: // lclNum - The varNum of the lclVar for which we're killing assertions. // tree - (DEBUG only) the tree responsible for killing its assertions. // // Notes: // For structs and struct fields, it will invalidate the children and parent // respectively. // Calls fgKillDependentAssertionsSingle to kill the assertions for a single lclVar. // void Compiler::fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree)) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvPromoted) { noway_assert(varTypeIsStruct(varDsc)); // Kill the field locals. for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i) { fgKillDependentAssertionsSingle(i DEBUGARG(tree)); } // Kill the struct local itself. fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree)); } else if (varDsc->lvIsStructField) { // Kill the field local. fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree)); // Kill the parent struct. fgKillDependentAssertionsSingle(varDsc->lvParentLcl DEBUGARG(tree)); } else { fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree)); } } /***************************************************************************** * * This function is called to complete the morphing of a tree node * It should only be called once for each node. * If DEBUG is defined the flag GTF_DEBUG_NODE_MORPHED is checked and updated, * to enforce the invariant that each node is only morphed once. * If local assertion prop is enabled the result tree may be replaced * by an equivalent tree. * */ void Compiler::fgMorphTreeDone(GenTree* tree, GenTree* oldTree /* == NULL */ DEBUGARG(int morphNum)) { #ifdef DEBUG if (verbose && treesBeforeAfterMorph) { printf("\nfgMorphTree (after %d):\n", morphNum); gtDispTree(tree); printf(""); // in our logic this causes a flush } #endif if (!fgGlobalMorph) { return; } if ((oldTree != nullptr) && (oldTree != tree)) { /* Ensure that we have morphed this node */ assert((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) && "ERROR: Did not morph this node!"); #ifdef DEBUG TransferTestDataToNode(oldTree, tree); #endif } else { // Ensure that we haven't morphed this node already assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0) && "ERROR: Already morphed this node!"); } if (tree->OperIsConst()) { goto DONE; } if (!optLocalAssertionProp) { goto DONE; } /* Do we have any active assertions? */ if (optAssertionCount > 0) { /* Is this an assignment to a local variable */ GenTreeLclVarCommon* lclVarTree = nullptr; // The check below will miss LIR-style assignments. // // But we shouldn't be running local assertion prop on these, // as local prop gets disabled when we run global prop. assert(!tree->OperIs(GT_STORE_LCL_VAR, GT_STORE_LCL_FLD)); // DefinesLocal can return true for some BLK op uses, so // check what gets assigned only when we're at an assignment. if (tree->OperIs(GT_ASG) && tree->DefinesLocal(this, &lclVarTree)) { unsigned lclNum = lclVarTree->GetLclNum(); noway_assert(lclNum < lvaCount); fgKillDependentAssertions(lclNum DEBUGARG(tree)); } } /* If this tree makes a new assertion - make it available */ optAssertionGen(tree); DONE:; #ifdef DEBUG /* Mark this node as being morphed */ tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif } //------------------------------------------------------------------------ // fgFoldConditional: try and fold conditionals and optimize BBJ_COND or // BBJ_SWITCH blocks. // // Argumetns: // block - block to examine // // Returns: // FoldResult indicating what changes were made, if any // Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) { FoldResult result = FoldResult::FOLD_DID_NOTHING; // We don't want to make any code unreachable // if (opts.OptimizationDisabled()) { return result; } if (block->bbJumpKind == BBJ_COND) { noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr); Statement* lastStmt = block->lastStmt(); noway_assert(lastStmt->GetNextStmt() == nullptr); if (lastStmt->GetRootNode()->gtOper == GT_CALL) { noway_assert(fgRemoveRestOfBlock); // Unconditional throw - transform the basic block into a BBJ_THROW // fgConvertBBToThrowBB(block); result = FoldResult::FOLD_CHANGED_CONTROL_FLOW; JITDUMP("\nConditional folded at " FMT_BB "\n", block->bbNum); JITDUMP(FMT_BB " becomes a BBJ_THROW\n", block->bbNum); return result; } noway_assert(lastStmt->GetRootNode()->gtOper == GT_JTRUE); /* Did we fold the conditional */ noway_assert(lastStmt->GetRootNode()->AsOp()->gtOp1); GenTree* condTree; condTree = lastStmt->GetRootNode()->AsOp()->gtOp1; GenTree* cond; cond = condTree->gtEffectiveVal(true); if (cond->OperIsConst()) { /* Yupee - we folded the conditional! * Remove the conditional statement */ noway_assert(cond->gtOper == GT_CNS_INT); noway_assert((block->bbNext->countOfInEdges() > 0) && (block->bbJumpDest->countOfInEdges() > 0)); if (condTree != cond) { // Preserve any side effects assert(condTree->OperIs(GT_COMMA)); lastStmt->SetRootNode(condTree); result = FoldResult::FOLD_ALTERED_LAST_STMT; } else { // no side effects, remove the jump entirely fgRemoveStmt(block, lastStmt); result = FoldResult::FOLD_REMOVED_LAST_STMT; } // block is a BBJ_COND that we are folding the conditional for. // bTaken is the path that will always be taken from block. // bNotTaken is the path that will never be taken from block. // BasicBlock* bTaken; BasicBlock* bNotTaken; if (cond->AsIntCon()->gtIconVal != 0) { /* JTRUE 1 - transform the basic block into a BBJ_ALWAYS */ block->bbJumpKind = BBJ_ALWAYS; bTaken = block->bbJumpDest; bNotTaken = block->bbNext; } else { /* Unmark the loop if we are removing a backwards branch */ /* dest block must also be marked as a loop head and */ /* We must be able to reach the backedge block */ if ((block->bbJumpDest->isLoopHead()) && (block->bbJumpDest->bbNum <= block->bbNum) && fgReachable(block->bbJumpDest, block)) { optUnmarkLoopBlocks(block->bbJumpDest, block); } /* JTRUE 0 - transform the basic block into a BBJ_NONE */ block->bbJumpKind = BBJ_NONE; bTaken = block->bbNext; bNotTaken = block->bbJumpDest; } if (fgHaveValidEdgeWeights) { // We are removing an edge from block to bNotTaken // and we have already computed the edge weights, so // we will try to adjust some of the weights // flowList* edgeTaken = fgGetPredForBlock(bTaken, block); BasicBlock* bUpdated = nullptr; // non-NULL if we updated the weight of an internal block // We examine the taken edge (block -> bTaken) // if block has valid profile weight and bTaken does not we try to adjust bTaken's weight // else if bTaken has valid profile weight and block does not we try to adjust block's weight // We can only adjust the block weights when (the edge block -> bTaken) is the only edge into bTaken // if (block->hasProfileWeight()) { // The edge weights for (block -> bTaken) are 100% of block's weight edgeTaken->setEdgeWeights(block->bbWeight, block->bbWeight, bTaken); if (!bTaken->hasProfileWeight()) { if ((bTaken->countOfInEdges() == 1) || (bTaken->bbWeight < block->bbWeight)) { // Update the weight of bTaken bTaken->inheritWeight(block); bUpdated = bTaken; } } } else if (bTaken->hasProfileWeight()) { if (bTaken->countOfInEdges() == 1) { // There is only one in edge to bTaken edgeTaken->setEdgeWeights(bTaken->bbWeight, bTaken->bbWeight, bTaken); // Update the weight of block block->inheritWeight(bTaken); bUpdated = block; } } if (bUpdated != nullptr) { weight_t newMinWeight; weight_t newMaxWeight; flowList* edge; // Now fix the weights of the edges out of 'bUpdated' switch (bUpdated->bbJumpKind) { case BBJ_NONE: edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext); break; case BBJ_COND: edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext); FALLTHROUGH; case BBJ_ALWAYS: edge = fgGetPredForBlock(bUpdated->bbJumpDest, bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext); break; default: // We don't handle BBJ_SWITCH break; } } } /* modify the flow graph */ /* Remove 'block' from the predecessor list of 'bNotTaken' */ fgRemoveRefPred(bNotTaken, block); #ifdef DEBUG if (verbose) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); printf(FMT_BB " becomes a %s", block->bbNum, block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); if (block->bbJumpKind == BBJ_ALWAYS) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } printf("\n"); } #endif /* if the block was a loop condition we may have to modify * the loop table */ for (unsigned loopNum = 0; loopNum < optLoopCount; loopNum++) { /* Some loops may have been already removed by * loop unrolling or conditional folding */ if (optLoopTable[loopNum].lpFlags & LPFLG_REMOVED) { continue; } /* We are only interested in the loop bottom */ if (optLoopTable[loopNum].lpBottom == block) { if (cond->AsIntCon()->gtIconVal == 0) { /* This was a bogus loop (condition always false) * Remove the loop from the table */ optMarkLoopRemoved(loopNum); optLoopTable[loopNum].lpTop->unmarkLoopAlign(this DEBUG_ARG("Bogus loop")); #ifdef DEBUG if (verbose) { printf("Removing loop " FMT_LP " (from " FMT_BB " to " FMT_BB ")\n\n", loopNum, optLoopTable[loopNum].lpTop->bbNum, optLoopTable[loopNum].lpBottom->bbNum); } #endif } } } } } else if (block->bbJumpKind == BBJ_SWITCH) { noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr); Statement* lastStmt = block->lastStmt(); noway_assert(lastStmt->GetNextStmt() == nullptr); if (lastStmt->GetRootNode()->gtOper == GT_CALL) { noway_assert(fgRemoveRestOfBlock); // Unconditional throw - transform the basic block into a BBJ_THROW // fgConvertBBToThrowBB(block); result = FoldResult::FOLD_CHANGED_CONTROL_FLOW; JITDUMP("\nConditional folded at " FMT_BB "\n", block->bbNum); JITDUMP(FMT_BB " becomes a BBJ_THROW\n", block->bbNum); return result; } noway_assert(lastStmt->GetRootNode()->gtOper == GT_SWITCH); /* Did we fold the conditional */ noway_assert(lastStmt->GetRootNode()->AsOp()->gtOp1); GenTree* condTree; condTree = lastStmt->GetRootNode()->AsOp()->gtOp1; GenTree* cond; cond = condTree->gtEffectiveVal(true); if (cond->OperIsConst()) { /* Yupee - we folded the conditional! * Remove the conditional statement */ noway_assert(cond->gtOper == GT_CNS_INT); if (condTree != cond) { // Preserve any side effects assert(condTree->OperIs(GT_COMMA)); lastStmt->SetRootNode(condTree); result = FoldResult::FOLD_ALTERED_LAST_STMT; } else { // no side effects, remove the switch entirely fgRemoveStmt(block, lastStmt); result = FoldResult::FOLD_REMOVED_LAST_STMT; } /* modify the flow graph */ /* Find the actual jump target */ unsigned switchVal; switchVal = (unsigned)cond->AsIntCon()->gtIconVal; unsigned jumpCnt; jumpCnt = block->bbJumpSwt->bbsCount; BasicBlock** jumpTab; jumpTab = block->bbJumpSwt->bbsDstTab; bool foundVal; foundVal = false; for (unsigned val = 0; val < jumpCnt; val++, jumpTab++) { BasicBlock* curJump = *jumpTab; assert(curJump->countOfInEdges() > 0); // If val matches switchVal or we are at the last entry and // we never found the switch value then set the new jump dest if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1))) { if (curJump != block->bbNext) { /* transform the basic block into a BBJ_ALWAYS */ block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = curJump; } else { /* transform the basic block into a BBJ_NONE */ block->bbJumpKind = BBJ_NONE; } foundVal = true; } else { /* Remove 'block' from the predecessor list of 'curJump' */ fgRemoveRefPred(curJump, block); } } assert(foundVal); #ifdef DEBUG if (verbose) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); printf(FMT_BB " becomes a %s", block->bbNum, block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); if (block->bbJumpKind == BBJ_ALWAYS) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } printf("\n"); } #endif } } return result; } //------------------------------------------------------------------------ // fgMorphBlockStmt: morph a single statement in a block. // // Arguments: // block - block containing the statement // stmt - statement to morph // msg - string to identify caller in a dump // // Returns: // true if 'stmt' was removed from the block. // s false if 'stmt' is still in the block (even if other statements were removed). // // Notes: // Can be called anytime, unlike fgMorphStmts() which should only be called once. // bool Compiler::fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg)) { assert(block != nullptr); assert(stmt != nullptr); // Reset some ambient state fgRemoveRestOfBlock = false; compCurBB = block; compCurStmt = stmt; GenTree* morph = fgMorphTree(stmt->GetRootNode()); // Bug 1106830 - During the CSE phase we can't just remove // morph->AsOp()->gtOp2 as it could contain CSE expressions. // This leads to a noway_assert in OptCSE.cpp when // searching for the removed CSE ref. (using gtFindLink) // if (!optValnumCSE_phase) { // Check for morph as a GT_COMMA with an unconditional throw if (fgIsCommaThrow(morph, true)) { #ifdef DEBUG if (verbose) { printf("Folding a top-level fgIsCommaThrow stmt\n"); printf("Removing op2 as unreachable:\n"); gtDispTree(morph->AsOp()->gtOp2); printf("\n"); } #endif // Use the call as the new stmt morph = morph->AsOp()->gtOp1; noway_assert(morph->gtOper == GT_CALL); } // we can get a throw as a statement root if (fgIsThrow(morph)) { #ifdef DEBUG if (verbose) { printf("We have a top-level fgIsThrow stmt\n"); printf("Removing the rest of block as unreachable:\n"); } #endif noway_assert((morph->gtFlags & GTF_COLON_COND) == 0); fgRemoveRestOfBlock = true; } } stmt->SetRootNode(morph); // Can the entire tree be removed? bool removedStmt = false; // Defer removing statements during CSE so we don't inadvertently remove any CSE defs. if (!optValnumCSE_phase) { removedStmt = fgCheckRemoveStmt(block, stmt); } // Or this is the last statement of a conditional branch that was just folded? if (!removedStmt && (stmt->GetNextStmt() == nullptr) && !fgRemoveRestOfBlock) { FoldResult const fr = fgFoldConditional(block); removedStmt = (fr == FoldResult::FOLD_REMOVED_LAST_STMT); } if (!removedStmt) { // Have to re-do the evaluation order since for example some later code does not expect constants as op1 gtSetStmtInfo(stmt); // Have to re-link the nodes for this statement fgSetStmtSeq(stmt); } #ifdef DEBUG if (verbose) { printf("%s %s tree:\n", msg, (removedStmt ? "removed" : "morphed")); gtDispTree(morph); printf("\n"); } #endif if (fgRemoveRestOfBlock) { // Remove the rest of the stmts in the block for (Statement* removeStmt : StatementList(stmt->GetNextStmt())) { fgRemoveStmt(block, removeStmt); } // The rest of block has been removed and we will always throw an exception. // // For compDbgCode, we prepend an empty BB as the firstBB, it is BBJ_NONE. // We should not convert it to a ThrowBB. if ((block != fgFirstBB) || ((fgFirstBB->bbFlags & BBF_INTERNAL) == 0)) { // Convert block to a throw bb fgConvertBBToThrowBB(block); } #ifdef DEBUG if (verbose) { printf("\n%s Block " FMT_BB " becomes a throw block.\n", msg, block->bbNum); } #endif fgRemoveRestOfBlock = false; } return removedStmt; } /***************************************************************************** * * Morph the statements of the given block. * This function should be called just once for a block. Use fgMorphBlockStmt() * for reentrant calls. */ void Compiler::fgMorphStmts(BasicBlock* block) { fgRemoveRestOfBlock = false; fgCurrentlyInUseArgTemps = hashBv::Create(this); for (Statement* const stmt : block->Statements()) { if (fgRemoveRestOfBlock) { fgRemoveStmt(block, stmt); continue; } #ifdef FEATURE_SIMD if (opts.OptimizationEnabled() && stmt->GetRootNode()->TypeGet() == TYP_FLOAT && stmt->GetRootNode()->OperGet() == GT_ASG) { fgMorphCombineSIMDFieldAssignments(block, stmt); } #endif fgMorphStmt = stmt; compCurStmt = stmt; GenTree* oldTree = stmt->GetRootNode(); #ifdef DEBUG unsigned oldHash = verbose ? gtHashValue(oldTree) : DUMMY_INIT(~0); if (verbose) { printf("\nfgMorphTree " FMT_BB ", " FMT_STMT " (before)\n", block->bbNum, stmt->GetID()); gtDispTree(oldTree); } #endif /* Morph this statement tree */ GenTree* morphedTree = fgMorphTree(oldTree); // mark any outgoing arg temps as free so we can reuse them in the next statement. fgCurrentlyInUseArgTemps->ZeroAll(); // Has fgMorphStmt been sneakily changed ? if ((stmt->GetRootNode() != oldTree) || (block != compCurBB)) { if (stmt->GetRootNode() != oldTree) { /* This must be tailcall. Ignore 'morphedTree' and carry on with the tail-call node */ morphedTree = stmt->GetRootNode(); } else { /* This must be a tailcall that caused a GCPoll to get injected. We haven't actually morphed the call yet but the flag still got set, clear it here... */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG morphedTree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; #endif } noway_assert(compTailCallUsed); noway_assert(morphedTree->gtOper == GT_CALL); GenTreeCall* call = morphedTree->AsCall(); // Could be // - a fast call made as jmp in which case block will be ending with // BBJ_RETURN (as we need epilog) and marked as containing a jmp. // - a tailcall dispatched via JIT helper, on x86, in which case // block will be ending with BBJ_THROW. // - a tail call dispatched via runtime help (IL stubs), in which // case there will not be any tailcall and the block will be ending // with BBJ_RETURN (as normal control flow) noway_assert((call->IsFastTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN) && ((compCurBB->bbFlags & BBF_HAS_JMP)) != 0) || (call->IsTailCallViaJitHelper() && (compCurBB->bbJumpKind == BBJ_THROW)) || (!call->IsTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN))); } #ifdef DEBUG if (compStressCompile(STRESS_CLONE_EXPR, 30)) { // Clone all the trees to stress gtCloneExpr() if (verbose) { printf("\nfgMorphTree (stressClone from):\n"); gtDispTree(morphedTree); } morphedTree = gtCloneExpr(morphedTree); noway_assert(morphedTree != nullptr); if (verbose) { printf("\nfgMorphTree (stressClone to):\n"); gtDispTree(morphedTree); } } /* If the hash value changes. we modified the tree during morphing */ if (verbose) { unsigned newHash = gtHashValue(morphedTree); if (newHash != oldHash) { printf("\nfgMorphTree " FMT_BB ", " FMT_STMT " (after)\n", block->bbNum, stmt->GetID()); gtDispTree(morphedTree); } } #endif /* Check for morphedTree as a GT_COMMA with an unconditional throw */ if (!gtIsActiveCSE_Candidate(morphedTree) && fgIsCommaThrow(morphedTree, true)) { /* Use the call as the new stmt */ morphedTree = morphedTree->AsOp()->gtOp1; noway_assert(morphedTree->gtOper == GT_CALL); noway_assert((morphedTree->gtFlags & GTF_COLON_COND) == 0); fgRemoveRestOfBlock = true; } stmt->SetRootNode(morphedTree); if (fgRemoveRestOfBlock) { continue; } /* Has the statement been optimized away */ if (fgCheckRemoveStmt(block, stmt)) { continue; } /* Check if this block ends with a conditional branch that can be folded */ if (fgFoldConditional(block) != FoldResult::FOLD_DID_NOTHING) { continue; } if (ehBlockHasExnFlowDsc(block)) { continue; } } if (fgRemoveRestOfBlock) { if ((block->bbJumpKind == BBJ_COND) || (block->bbJumpKind == BBJ_SWITCH)) { Statement* first = block->firstStmt(); noway_assert(first); Statement* lastStmt = block->lastStmt(); noway_assert(lastStmt && lastStmt->GetNextStmt() == nullptr); GenTree* last = lastStmt->GetRootNode(); if (((block->bbJumpKind == BBJ_COND) && (last->gtOper == GT_JTRUE)) || ((block->bbJumpKind == BBJ_SWITCH) && (last->gtOper == GT_SWITCH))) { GenTree* op1 = last->AsOp()->gtOp1; if (op1->OperIsCompare()) { /* Unmark the comparison node with GTF_RELOP_JMP_USED */ op1->gtFlags &= ~GTF_RELOP_JMP_USED; } lastStmt->SetRootNode(fgMorphTree(op1)); } } /* Mark block as a BBJ_THROW block */ fgConvertBBToThrowBB(block); } #if FEATURE_FASTTAILCALL GenTree* recursiveTailCall = nullptr; if (block->endsWithTailCallConvertibleToLoop(this, &recursiveTailCall)) { fgMorphRecursiveFastTailCallIntoLoop(block, recursiveTailCall->AsCall()); } #endif // Reset this back so that it doesn't leak out impacting other blocks fgRemoveRestOfBlock = false; } /***************************************************************************** * * Morph the blocks of the method. * Returns true if the basic block list is modified. * This function should be called just once. */ void Compiler::fgMorphBlocks() { #ifdef DEBUG if (verbose) { printf("\n*************** In fgMorphBlocks()\n"); } #endif /* Since fgMorphTree can be called after various optimizations to re-arrange * the nodes we need a global flag to signal if we are during the one-pass * global morphing */ fgGlobalMorph = true; // // Local assertion prop is enabled if we are optimized // optLocalAssertionProp = opts.OptimizationEnabled(); if (optLocalAssertionProp) { // // Initialize for local assertion prop // optAssertionInit(true); } if (!compEnregLocals()) { // Morph is checking if lvDoNotEnregister is already set for some optimizations. // If we are running without `CLFLG_REGVAR` flag set (`compEnregLocals() == false`) // then we already know that we won't enregister any locals and it is better to set // this flag before we start reading it. // The main reason why this flag is not set is that we are running in minOpts. lvSetMinOptsDoNotEnreg(); } /*------------------------------------------------------------------------- * Process all basic blocks in the function */ BasicBlock* block = fgFirstBB; noway_assert(block); do { #ifdef DEBUG if (verbose) { printf("\nMorphing " FMT_BB " of '%s'\n", block->bbNum, info.compFullName); } #endif if (optLocalAssertionProp) { // // Clear out any currently recorded assertion candidates // before processing each basic block, // also we must handle QMARK-COLON specially // optAssertionReset(0); } // Make the current basic block address available globally. compCurBB = block; // Process all statement trees in the basic block. fgMorphStmts(block); // Do we need to merge the result of this block into a single return block? if ((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { if ((genReturnBB != nullptr) && (genReturnBB != block)) { fgMergeBlockReturn(block); } } block = block->bbNext; } while (block != nullptr); // We are done with the global morphing phase fgGlobalMorph = false; compCurBB = nullptr; // Under OSR, we no longer need to specially protect the original method entry // if (opts.IsOSR() && (fgEntryBB != nullptr) && (fgEntryBB->bbFlags & BBF_IMPORTED)) { JITDUMP("OSR: un-protecting original method entry " FMT_BB "\n", fgEntryBB->bbNum); assert(fgEntryBB->bbRefs > 0); fgEntryBB->bbRefs--; // We don't need to remember this block anymore. fgEntryBB = nullptr; } #ifdef DEBUG if (verboseTrees) { fgDispBasicBlocks(true); } #endif } //------------------------------------------------------------------------ // fgMergeBlockReturn: assign the block return value (if any) into the single return temp // and branch to the single return block. // // Arguments: // block - the block to process. // // Notes: // A block is not guaranteed to have a last stmt if its jump kind is BBJ_RETURN. // For example a method returning void could have an empty block with jump kind BBJ_RETURN. // Such blocks do materialize as part of in-lining. // // A block with jump kind BBJ_RETURN does not necessarily need to end with GT_RETURN. // It could end with a tail call or rejected tail call or monitor.exit or a GT_INTRINSIC. // For now it is safe to explicitly check whether last stmt is GT_RETURN if genReturnLocal // is BAD_VAR_NUM. // void Compiler::fgMergeBlockReturn(BasicBlock* block) { assert((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)); assert((genReturnBB != nullptr) && (genReturnBB != block)); // TODO: Need to characterize the last top level stmt of a block ending with BBJ_RETURN. Statement* lastStmt = block->lastStmt(); GenTree* ret = (lastStmt != nullptr) ? lastStmt->GetRootNode() : nullptr; if ((ret != nullptr) && (ret->OperGet() == GT_RETURN) && ((ret->gtFlags & GTF_RET_MERGED) != 0)) { // This return was generated during epilog merging, so leave it alone } else { // We'll jump to the genReturnBB. CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_X86) if (info.compFlags & CORINFO_FLG_SYNCH) { fgConvertSyncReturnToLeave(block); } else #endif // !TARGET_X86 { block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = genReturnBB; fgAddRefPred(genReturnBB, block); fgReturnCount--; } if (genReturnLocal != BAD_VAR_NUM) { // replace the GT_RETURN node to be a GT_ASG that stores the return value into genReturnLocal. // Method must be returning a value other than TYP_VOID. noway_assert(compMethodHasRetVal()); // This block must be ending with a GT_RETURN noway_assert(lastStmt != nullptr); noway_assert(lastStmt->GetNextStmt() == nullptr); noway_assert(ret != nullptr); // GT_RETURN must have non-null operand as the method is returning the value assigned to // genReturnLocal noway_assert(ret->OperGet() == GT_RETURN); noway_assert(ret->gtGetOp1() != nullptr); Statement* pAfterStatement = lastStmt; const DebugInfo& di = lastStmt->GetDebugInfo(); GenTree* tree = gtNewTempAssign(genReturnLocal, ret->gtGetOp1(), &pAfterStatement, di, block); if (tree->OperIsCopyBlkOp()) { tree = fgMorphCopyBlock(tree); } else if (tree->OperIsInitBlkOp()) { tree = fgMorphInitBlock(tree); } if (pAfterStatement == lastStmt) { lastStmt->SetRootNode(tree); } else { // gtNewTempAssign inserted additional statements after last fgRemoveStmt(block, lastStmt); Statement* newStmt = gtNewStmt(tree, di); fgInsertStmtAfter(block, pAfterStatement, newStmt); lastStmt = newStmt; } } else if (ret != nullptr && ret->OperGet() == GT_RETURN) { // This block ends with a GT_RETURN noway_assert(lastStmt != nullptr); noway_assert(lastStmt->GetNextStmt() == nullptr); // Must be a void GT_RETURN with null operand; delete it as this block branches to oneReturn // block noway_assert(ret->TypeGet() == TYP_VOID); noway_assert(ret->gtGetOp1() == nullptr); fgRemoveStmt(block, lastStmt); } JITDUMP("\nUpdate " FMT_BB " to jump to common return block.\n", block->bbNum); DISPBLOCK(block); if (block->hasProfileWeight()) { weight_t const oldWeight = genReturnBB->hasProfileWeight() ? genReturnBB->bbWeight : BB_ZERO_WEIGHT; weight_t const newWeight = oldWeight + block->bbWeight; JITDUMP("merging profile weight " FMT_WT " from " FMT_BB " to common return " FMT_BB "\n", block->bbWeight, block->bbNum, genReturnBB->bbNum); genReturnBB->setBBProfileWeight(newWeight); DISPBLOCK(genReturnBB); } } } /***************************************************************************** * * Make some decisions about the kind of code to generate. */ void Compiler::fgSetOptions() { #ifdef DEBUG /* Should we force fully interruptible code ? */ if (JitConfig.JitFullyInt() || compStressCompile(STRESS_GENERIC_VARN, 30)) { noway_assert(!codeGen->isGCTypeFixed()); SetInterruptible(true); } #endif if (opts.compDbgCode) { assert(!codeGen->isGCTypeFixed()); SetInterruptible(true); // debugging is easier this way ... } /* Assume we won't need an explicit stack frame if this is allowed */ if (compLocallocUsed) { codeGen->setFramePointerRequired(true); } #ifdef TARGET_X86 if (compTailCallUsed) codeGen->setFramePointerRequired(true); #endif // TARGET_X86 if (!opts.genFPopt) { codeGen->setFramePointerRequired(true); } // Assert that the EH table has been initialized by now. Note that // compHndBBtabAllocCount never decreases; it is a high-water mark // of table allocation. In contrast, compHndBBtabCount does shrink // if we delete a dead EH region, and if it shrinks to zero, the // table pointer compHndBBtab is unreliable. assert(compHndBBtabAllocCount >= info.compXcptnsCount); #ifdef TARGET_X86 // Note: this case, and the !X86 case below, should both use the // !X86 path. This would require a few more changes for X86 to use // compHndBBtabCount (the current number of EH clauses) instead of // info.compXcptnsCount (the number of EH clauses in IL), such as // in ehNeedsShadowSPslots(). This is because sometimes the IL has // an EH clause that we delete as statically dead code before we // get here, leaving no EH clauses left, and thus no requirement // to use a frame pointer because of EH. But until all the code uses // the same test, leave info.compXcptnsCount here. if (info.compXcptnsCount > 0) { codeGen->setFramePointerRequiredEH(true); } #else // !TARGET_X86 if (compHndBBtabCount > 0) { codeGen->setFramePointerRequiredEH(true); } #endif // TARGET_X86 #ifdef UNIX_X86_ABI if (info.compXcptnsCount > 0) { assert(!codeGen->isGCTypeFixed()); // Enforce fully interruptible codegen for funclet unwinding SetInterruptible(true); } #endif // UNIX_X86_ABI if (compMethodRequiresPInvokeFrame()) { codeGen->setFramePointerRequired(true); // Setup of Pinvoke frame currently requires an EBP style frame } if (info.compPublishStubParam) { codeGen->setFramePointerRequiredGCInfo(true); } if (compIsProfilerHookNeeded()) { codeGen->setFramePointerRequired(true); } if (info.compIsVarArgs) { // Code that initializes lvaVarargsBaseOfStkArgs requires this to be EBP relative. codeGen->setFramePointerRequiredGCInfo(true); } if (lvaReportParamTypeArg()) { codeGen->setFramePointerRequiredGCInfo(true); } // printf("method will %s be fully interruptible\n", GetInterruptible() ? " " : "not"); } /*****************************************************************************/ GenTree* Compiler::fgInitThisClass() { noway_assert(!compIsForInlining()); CORINFO_LOOKUP_KIND kind; info.compCompHnd->getLocationOfThisType(info.compMethodHnd, &kind); if (!kind.needsRuntimeLookup) { return fgGetSharedCCtor(info.compClassHnd); } else { #ifdef FEATURE_READYTORUN // Only CoreRT understands CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE. Don't do this on CoreCLR. if (opts.IsReadyToRun() && IsTargetAbi(CORINFO_CORERT_ABI)) { CORINFO_RESOLVED_TOKEN resolvedToken; memset(&resolvedToken, 0, sizeof(resolvedToken)); // We are in a shared method body, but maybe we don't need a runtime lookup after all. // This covers the case of a generic method on a non-generic type. if (!(info.compClassAttr & CORINFO_FLG_SHAREDINST)) { resolvedToken.hClass = info.compClassHnd; return impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF); } // We need a runtime lookup. GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind); // CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE with a zeroed out resolvedToken means "get the static // base of the class that owns the method being compiled". If we're in this method, it means we're not // inlining and there's no ambiguity. return impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, TYP_BYREF, gtNewCallArgs(ctxTree), &kind); } #endif // Collectible types requires that for shared generic code, if we use the generic context paramter // that we report it. (This is a conservative approach, we could detect some cases particularly when the // context parameter is this that we don't need the eager reporting logic.) lvaGenericsContextInUse = true; switch (kind.runtimeLookupKind) { case CORINFO_LOOKUP_THISOBJ: { // This code takes a this pointer; but we need to pass the static method desc to get the right point in // the hierarchy GenTree* vtTree = gtNewLclvNode(info.compThisArg, TYP_REF); vtTree->gtFlags |= GTF_VAR_CONTEXT; // Vtable pointer of this object vtTree = gtNewMethodTableLookup(vtTree); GenTree* methodHnd = gtNewIconEmbMethHndNode(info.compMethodHnd); return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS, TYP_VOID, gtNewCallArgs(vtTree, methodHnd)); } case CORINFO_LOOKUP_CLASSPARAM: { GenTree* vtTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); vtTree->gtFlags |= GTF_VAR_CONTEXT; return gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewCallArgs(vtTree)); } case CORINFO_LOOKUP_METHODPARAM: { GenTree* methHndTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); methHndTree->gtFlags |= GTF_VAR_CONTEXT; return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS, TYP_VOID, gtNewCallArgs(gtNewIconNode(0), methHndTree)); } default: noway_assert(!"Unknown LOOKUP_KIND"); UNREACHABLE(); } } } #ifdef DEBUG /***************************************************************************** * * Tree walk callback to make sure no GT_QMARK nodes are present in the tree, * except for the allowed ? 1 : 0; pattern. */ Compiler::fgWalkResult Compiler::fgAssertNoQmark(GenTree** tree, fgWalkData* data) { if ((*tree)->OperGet() == GT_QMARK) { fgCheckQmarkAllowedForm(*tree); } return WALK_CONTINUE; } void Compiler::fgCheckQmarkAllowedForm(GenTree* tree) { assert(tree->OperGet() == GT_QMARK); assert(!"Qmarks beyond morph disallowed."); } /***************************************************************************** * * Verify that the importer has created GT_QMARK nodes in a way we can * process them. The following is allowed: * * 1. A top level qmark. Top level qmark is of the form: * a) (bool) ? (void) : (void) OR * b) V0N = (bool) ? (type) : (type) * * 2. Recursion is allowed at the top level, i.e., a GT_QMARK can be a child * of either op1 of colon or op2 of colon but not a child of any other * operator. */ void Compiler::fgPreExpandQmarkChecks(GenTree* expr) { GenTree* topQmark = fgGetTopLevelQmark(expr); // If the top level Qmark is null, then scan the tree to make sure // there are no qmarks within it. if (topQmark == nullptr) { fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr); } else { // We could probably expand the cond node also, but don't think the extra effort is necessary, // so let's just assert the cond node of a top level qmark doesn't have further top level qmarks. fgWalkTreePre(&topQmark->AsOp()->gtOp1, Compiler::fgAssertNoQmark, nullptr); fgPreExpandQmarkChecks(topQmark->AsOp()->gtOp2->AsOp()->gtOp1); fgPreExpandQmarkChecks(topQmark->AsOp()->gtOp2->AsOp()->gtOp2); } } #endif // DEBUG /***************************************************************************** * * Get the top level GT_QMARK node in a given "expr", return NULL if such a * node is not present. If the top level GT_QMARK node is assigned to a * GT_LCL_VAR, then return the lcl node in ppDst. * */ GenTree* Compiler::fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst /* = NULL */) { if (ppDst != nullptr) { *ppDst = nullptr; } GenTree* topQmark = nullptr; if (expr->gtOper == GT_QMARK) { topQmark = expr; } else if (expr->gtOper == GT_ASG && expr->AsOp()->gtOp2->gtOper == GT_QMARK && expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { topQmark = expr->AsOp()->gtOp2; if (ppDst != nullptr) { *ppDst = expr->AsOp()->gtOp1; } } return topQmark; } /********************************************************************************* * * For a castclass helper call, * Importer creates the following tree: * tmp = (op1 == null) ? op1 : ((*op1 == (cse = op2, cse)) ? op1 : helper()); * * This method splits the qmark expression created by the importer into the * following blocks: (block, asg, cond1, cond2, helper, remainder) * Notice that op1 is the result for both the conditions. So we coalesce these * assignments into a single block instead of two blocks resulting a nested diamond. * * +---------->-----------+ * | | | * ^ ^ v * | | | * block-->asg-->cond1--+-->cond2--+-->helper--+-->remainder * * We expect to achieve the following codegen: * mov rsi, rdx tmp = op1 // asgBlock * test rsi, rsi goto skip if tmp == null ? // cond1Block * je SKIP * mov rcx, 0x76543210 cns = op2 // cond2Block * cmp qword ptr [rsi], rcx goto skip if *tmp == op2 * je SKIP * call CORINFO_HELP_CHKCASTCLASS_SPECIAL tmp = helper(cns, tmp) // helperBlock * mov rsi, rax * SKIP: // remainderBlock * tmp has the result. * */ void Compiler::fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt) { #ifdef DEBUG if (verbose) { printf("\nExpanding CastInstOf qmark in " FMT_BB " (before)\n", block->bbNum); fgDispBasicBlocks(block, block, true); } #endif // DEBUG GenTree* expr = stmt->GetRootNode(); GenTree* dst = nullptr; GenTree* qmark = fgGetTopLevelQmark(expr, &dst); noway_assert(dst != nullptr); assert(qmark->gtFlags & GTF_QMARK_CAST_INSTOF); // Get cond, true, false exprs for the qmark. GenTree* condExpr = qmark->gtGetOp1(); GenTree* trueExpr = qmark->gtGetOp2()->AsColon()->ThenNode(); GenTree* falseExpr = qmark->gtGetOp2()->AsColon()->ElseNode(); // Get cond, true, false exprs for the nested qmark. GenTree* nestedQmark = falseExpr; GenTree* cond2Expr; GenTree* true2Expr; GenTree* false2Expr; if (nestedQmark->gtOper == GT_QMARK) { cond2Expr = nestedQmark->gtGetOp1(); true2Expr = nestedQmark->gtGetOp2()->AsColon()->ThenNode(); false2Expr = nestedQmark->gtGetOp2()->AsColon()->ElseNode(); } else { // This is a rare case that arises when we are doing minopts and encounter isinst of null // gtFoldExpr was still is able to optimize away part of the tree (but not all). // That means it does not match our pattern. // Rather than write code to handle this case, just fake up some nodes to make it match the common // case. Synthesize a comparison that is always true, and for the result-on-true, use the // entire subtree we expected to be the nested question op. cond2Expr = gtNewOperNode(GT_EQ, TYP_INT, gtNewIconNode(0, TYP_I_IMPL), gtNewIconNode(0, TYP_I_IMPL)); true2Expr = nestedQmark; false2Expr = gtNewIconNode(0, TYP_I_IMPL); } assert(false2Expr->OperGet() == trueExpr->OperGet()); // Create the chain of blocks. See method header comment. // The order of blocks after this is the following: // block ... asgBlock ... cond1Block ... cond2Block ... helperBlock ... remainderBlock // // We need to remember flags that exist on 'block' that we want to propagate to 'remainderBlock', // if they are going to be cleared by fgSplitBlockAfterStatement(). We currently only do this only // for the GC safe point bit, the logic being that if 'block' was marked gcsafe, then surely // remainderBlock will still be GC safe. BasicBlockFlags propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT; BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt); fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock. BasicBlock* helperBlock = fgNewBBafter(BBJ_NONE, block, true); BasicBlock* cond2Block = fgNewBBafter(BBJ_COND, block, true); BasicBlock* cond1Block = fgNewBBafter(BBJ_COND, block, true); BasicBlock* asgBlock = fgNewBBafter(BBJ_NONE, block, true); remainderBlock->bbFlags |= propagateFlags; // These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter). // If they're not internal, mark them as imported to avoid asserts about un-imported blocks. if ((block->bbFlags & BBF_INTERNAL) == 0) { helperBlock->bbFlags &= ~BBF_INTERNAL; cond2Block->bbFlags &= ~BBF_INTERNAL; cond1Block->bbFlags &= ~BBF_INTERNAL; asgBlock->bbFlags &= ~BBF_INTERNAL; helperBlock->bbFlags |= BBF_IMPORTED; cond2Block->bbFlags |= BBF_IMPORTED; cond1Block->bbFlags |= BBF_IMPORTED; asgBlock->bbFlags |= BBF_IMPORTED; } // Chain the flow correctly. fgAddRefPred(asgBlock, block); fgAddRefPred(cond1Block, asgBlock); fgAddRefPred(cond2Block, cond1Block); fgAddRefPred(helperBlock, cond2Block); fgAddRefPred(remainderBlock, helperBlock); fgAddRefPred(remainderBlock, cond1Block); fgAddRefPred(remainderBlock, cond2Block); cond1Block->bbJumpDest = remainderBlock; cond2Block->bbJumpDest = remainderBlock; // Set the weights; some are guesses. asgBlock->inheritWeight(block); cond1Block->inheritWeight(block); cond2Block->inheritWeightPercentage(cond1Block, 50); helperBlock->inheritWeightPercentage(cond2Block, 50); // Append cond1 as JTRUE to cond1Block GenTree* jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, condExpr); Statement* jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo()); fgInsertStmtAtEnd(cond1Block, jmpStmt); // Append cond2 as JTRUE to cond2Block jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, cond2Expr); jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo()); fgInsertStmtAtEnd(cond2Block, jmpStmt); // AsgBlock should get tmp = op1 assignment. trueExpr = gtNewTempAssign(dst->AsLclVarCommon()->GetLclNum(), trueExpr); Statement* trueStmt = fgNewStmtFromTree(trueExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(asgBlock, trueStmt); // Since we are adding helper in the JTRUE false path, reverse the cond2 and add the helper. gtReverseCond(cond2Expr); GenTree* helperExpr = gtNewTempAssign(dst->AsLclVarCommon()->GetLclNum(), true2Expr); Statement* helperStmt = fgNewStmtFromTree(helperExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(helperBlock, helperStmt); // Finally remove the nested qmark stmt. fgRemoveStmt(block, stmt); if (true2Expr->OperIs(GT_CALL) && (true2Expr->AsCall()->gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN)) { fgConvertBBToThrowBB(helperBlock); } #ifdef DEBUG if (verbose) { printf("\nExpanding CastInstOf qmark in " FMT_BB " (after)\n", block->bbNum); fgDispBasicBlocks(block, remainderBlock, true); } #endif // DEBUG } /***************************************************************************** * * Expand a statement with a top level qmark node. There are three cases, based * on whether the qmark has both "true" and "false" arms, or just one of them. * * S0; * C ? T : F; * S1; * * Generates ===> * * bbj_always * +---->------+ * false | | * S0 -->-- ~C -->-- T F -->-- S1 * | | * +--->--------+ * bbj_cond(true) * * ----------------------------------------- * * S0; * C ? T : NOP; * S1; * * Generates ===> * * false * S0 -->-- ~C -->-- T -->-- S1 * | | * +-->-------------+ * bbj_cond(true) * * ----------------------------------------- * * S0; * C ? NOP : F; * S1; * * Generates ===> * * false * S0 -->-- C -->-- F -->-- S1 * | | * +-->------------+ * bbj_cond(true) * * If the qmark assigns to a variable, then create tmps for "then" * and "else" results and assign the temp to the variable as a writeback step. */ void Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) { GenTree* expr = stmt->GetRootNode(); // Retrieve the Qmark node to be expanded. GenTree* dst = nullptr; GenTree* qmark = fgGetTopLevelQmark(expr, &dst); if (qmark == nullptr) { return; } if (qmark->gtFlags & GTF_QMARK_CAST_INSTOF) { fgExpandQmarkForCastInstOf(block, stmt); return; } #ifdef DEBUG if (verbose) { printf("\nExpanding top-level qmark in " FMT_BB " (before)\n", block->bbNum); fgDispBasicBlocks(block, block, true); } #endif // DEBUG // Retrieve the operands. GenTree* condExpr = qmark->gtGetOp1(); GenTree* trueExpr = qmark->gtGetOp2()->AsColon()->ThenNode(); GenTree* falseExpr = qmark->gtGetOp2()->AsColon()->ElseNode(); assert(!varTypeIsFloating(condExpr->TypeGet())); bool hasTrueExpr = (trueExpr->OperGet() != GT_NOP); bool hasFalseExpr = (falseExpr->OperGet() != GT_NOP); assert(hasTrueExpr || hasFalseExpr); // We expect to have at least one arm of the qmark! // Create remainder, cond and "else" blocks. After this, the blocks are in this order: // block ... condBlock ... elseBlock ... remainderBlock // // We need to remember flags that exist on 'block' that we want to propagate to 'remainderBlock', // if they are going to be cleared by fgSplitBlockAfterStatement(). We currently only do this only // for the GC safe point bit, the logic being that if 'block' was marked gcsafe, then surely // remainderBlock will still be GC safe. BasicBlockFlags propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT; BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt); fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock. BasicBlock* condBlock = fgNewBBafter(BBJ_COND, block, true); BasicBlock* elseBlock = fgNewBBafter(BBJ_NONE, condBlock, true); // These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter). // If they're not internal, mark them as imported to avoid asserts about un-imported blocks. if ((block->bbFlags & BBF_INTERNAL) == 0) { condBlock->bbFlags &= ~BBF_INTERNAL; elseBlock->bbFlags &= ~BBF_INTERNAL; condBlock->bbFlags |= BBF_IMPORTED; elseBlock->bbFlags |= BBF_IMPORTED; } remainderBlock->bbFlags |= propagateFlags; condBlock->inheritWeight(block); fgAddRefPred(condBlock, block); fgAddRefPred(elseBlock, condBlock); fgAddRefPred(remainderBlock, elseBlock); BasicBlock* thenBlock = nullptr; if (hasTrueExpr && hasFalseExpr) { // bbj_always // +---->------+ // false | | // S0 -->-- ~C -->-- T F -->-- S1 // | | // +--->--------+ // bbj_cond(true) // gtReverseCond(condExpr); condBlock->bbJumpDest = elseBlock; thenBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true); thenBlock->bbJumpDest = remainderBlock; if ((block->bbFlags & BBF_INTERNAL) == 0) { thenBlock->bbFlags &= ~BBF_INTERNAL; thenBlock->bbFlags |= BBF_IMPORTED; } fgAddRefPred(thenBlock, condBlock); fgAddRefPred(remainderBlock, thenBlock); thenBlock->inheritWeightPercentage(condBlock, 50); elseBlock->inheritWeightPercentage(condBlock, 50); } else if (hasTrueExpr) { // false // S0 -->-- ~C -->-- T -->-- S1 // | | // +-->-------------+ // bbj_cond(true) // gtReverseCond(condExpr); condBlock->bbJumpDest = remainderBlock; fgAddRefPred(remainderBlock, condBlock); // Since we have no false expr, use the one we'd already created. thenBlock = elseBlock; elseBlock = nullptr; thenBlock->inheritWeightPercentage(condBlock, 50); } else if (hasFalseExpr) { // false // S0 -->-- C -->-- F -->-- S1 // | | // +-->------------+ // bbj_cond(true) // condBlock->bbJumpDest = remainderBlock; fgAddRefPred(remainderBlock, condBlock); elseBlock->inheritWeightPercentage(condBlock, 50); } GenTree* jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, qmark->gtGetOp1()); Statement* jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo()); fgInsertStmtAtEnd(condBlock, jmpStmt); // Remove the original qmark statement. fgRemoveStmt(block, stmt); // Since we have top level qmarks, we either have a dst for it in which case // we need to create tmps for true and falseExprs, else just don't bother // assigning. unsigned lclNum = BAD_VAR_NUM; if (dst != nullptr) { assert(dst->gtOper == GT_LCL_VAR); lclNum = dst->AsLclVar()->GetLclNum(); } else { assert(qmark->TypeGet() == TYP_VOID); } if (hasTrueExpr) { if (dst != nullptr) { trueExpr = gtNewTempAssign(lclNum, trueExpr); } Statement* trueStmt = fgNewStmtFromTree(trueExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(thenBlock, trueStmt); } // Assign the falseExpr into the dst or tmp, insert in elseBlock if (hasFalseExpr) { if (dst != nullptr) { falseExpr = gtNewTempAssign(lclNum, falseExpr); } Statement* falseStmt = fgNewStmtFromTree(falseExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(elseBlock, falseStmt); } #ifdef DEBUG if (verbose) { printf("\nExpanding top-level qmark in " FMT_BB " (after)\n", block->bbNum); fgDispBasicBlocks(block, remainderBlock, true); } #endif // DEBUG } /***************************************************************************** * * Expand GT_QMARK nodes from the flow graph into basic blocks. * */ void Compiler::fgExpandQmarkNodes() { if (compQmarkUsed) { for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { GenTree* expr = stmt->GetRootNode(); #ifdef DEBUG fgPreExpandQmarkChecks(expr); #endif fgExpandQmarkStmt(block, stmt); } } #ifdef DEBUG fgPostExpandQmarkChecks(); #endif } compQmarkRationalized = true; } #ifdef DEBUG /***************************************************************************** * * Make sure we don't have any more GT_QMARK nodes. * */ void Compiler::fgPostExpandQmarkChecks() { for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { GenTree* expr = stmt->GetRootNode(); fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr); } } } #endif /***************************************************************************** * * Promoting struct locals */ void Compiler::fgPromoteStructs() { #ifdef DEBUG if (verbose) { printf("*************** In fgPromoteStructs()\n"); } #endif // DEBUG if (!opts.OptEnabled(CLFLG_STRUCTPROMOTE)) { JITDUMP(" promotion opt flag not enabled\n"); return; } if (fgNoStructPromotion) { JITDUMP(" promotion disabled by JitNoStructPromotion\n"); return; } #if 0 // The code in this #if has been useful in debugging struct promotion issues, by // enabling selective enablement of the struct promotion optimization according to // method hash. #ifdef DEBUG unsigned methHash = info.compMethodHash(); char* lostr = getenv("structpromohashlo"); unsigned methHashLo = 0; if (lostr != NULL) { sscanf_s(lostr, "%x", &methHashLo); } char* histr = getenv("structpromohashhi"); unsigned methHashHi = UINT32_MAX; if (histr != NULL) { sscanf_s(histr, "%x", &methHashHi); } if (methHash < methHashLo || methHash > methHashHi) { return; } else { printf("Promoting structs for method %s, hash = 0x%x.\n", info.compFullName, info.compMethodHash()); printf(""); // in our logic this causes a flush } #endif // DEBUG #endif // 0 if (info.compIsVarArgs) { JITDUMP(" promotion disabled because of varargs\n"); return; } #ifdef DEBUG if (verbose) { printf("\nlvaTable before fgPromoteStructs\n"); lvaTableDump(); } #endif // DEBUG // The lvaTable might grow as we grab temps. Make a local copy here. unsigned startLvaCount = lvaCount; // // Loop through the original lvaTable. Looking for struct locals to be promoted. // lvaStructPromotionInfo structPromotionInfo; bool tooManyLocalsReported = false; // Clear the structPromotionHelper, since it is used during inlining, at which point it // may be conservative about looking up SIMD info. // We don't want to preserve those conservative decisions for the actual struct promotion. structPromotionHelper->Clear(); for (unsigned lclNum = 0; lclNum < startLvaCount; lclNum++) { // Whether this var got promoted bool promotedVar = false; LclVarDsc* varDsc = lvaGetDesc(lclNum); // If we have marked this as lvUsedInSIMDIntrinsic, then we do not want to promote // its fields. Instead, we will attempt to enregister the entire struct. if (varDsc->lvIsSIMDType() && (varDsc->lvIsUsedInSIMDIntrinsic() || isOpaqueSIMDLclVar(varDsc))) { varDsc->lvRegStruct = true; } // Don't promote if we have reached the tracking limit. else if (lvaHaveManyLocals()) { // Print the message first time when we detected this condition if (!tooManyLocalsReported) { JITDUMP("Stopped promoting struct fields, due to too many locals.\n"); } tooManyLocalsReported = true; } else if (varTypeIsStruct(varDsc)) { assert(structPromotionHelper != nullptr); promotedVar = structPromotionHelper->TryPromoteStructVar(lclNum); } if (!promotedVar && varDsc->lvIsSIMDType() && !varDsc->lvFieldAccessed) { // Even if we have not used this in a SIMD intrinsic, if it is not being promoted, // we will treat it as a reg struct. varDsc->lvRegStruct = true; } } #ifdef DEBUG if (verbose) { printf("\nlvaTable after fgPromoteStructs\n"); lvaTableDump(); } #endif // DEBUG } void Compiler::fgMorphStructField(GenTree* tree, GenTree* parent) { noway_assert(tree->OperGet() == GT_FIELD); GenTreeField* field = tree->AsField(); GenTree* objRef = field->GetFldObj(); GenTree* obj = ((objRef != nullptr) && (objRef->gtOper == GT_ADDR)) ? objRef->AsOp()->gtOp1 : nullptr; noway_assert((tree->gtFlags & GTF_GLOB_REF) || ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR))); /* Is this an instance data member? */ if ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR)) { unsigned lclNum = obj->AsLclVarCommon()->GetLclNum(); const LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varTypeIsStruct(obj)) { if (varDsc->lvPromoted) { // Promoted struct unsigned fldOffset = field->gtFldOffset; unsigned fieldLclIndex = lvaGetFieldLocal(varDsc, fldOffset); if (fieldLclIndex == BAD_VAR_NUM) { // Access a promoted struct's field with an offset that doesn't correspond to any field. // It can happen if the struct was cast to another struct with different offsets. return; } const LclVarDsc* fieldDsc = lvaGetDesc(fieldLclIndex); var_types fieldType = fieldDsc->TypeGet(); assert(fieldType != TYP_STRUCT); // promoted LCL_VAR can't have a struct type. if (tree->TypeGet() != fieldType) { if (tree->TypeGet() != TYP_STRUCT) { // This is going to be an incorrect instruction promotion. // For example when we try to read int as long. return; } if (field->gtFldHnd != fieldDsc->lvFieldHnd) { CORINFO_CLASS_HANDLE fieldTreeClass = nullptr, fieldDscClass = nullptr; CorInfoType fieldTreeType = info.compCompHnd->getFieldType(field->gtFldHnd, &fieldTreeClass); CorInfoType fieldDscType = info.compCompHnd->getFieldType(fieldDsc->lvFieldHnd, &fieldDscClass); if (fieldTreeType != fieldDscType || fieldTreeClass != fieldDscClass) { // Access the promoted field with a different class handle, can't check that types match. return; } // Access the promoted field as a field of a non-promoted struct with the same class handle. } else { // As we already checked this above, we must have a tree with a TYP_STRUCT type // assert(tree->TypeGet() == TYP_STRUCT); // The field tree accesses it as a struct, but the promoted LCL_VAR field // says that it has another type. This happens when struct promotion unwraps // a single field struct to get to its ultimate type. // // Note that currently, we cannot have a promoted LCL_VAR field with a struct type. // // This mismatch in types can lead to problems for some parent node type like GT_RETURN. // So we check the parent node and only allow this optimization when we have // a GT_ADDR or a GT_ASG. // // Note that for a GT_ASG we have to do some additional work, // see below after the SetOper(GT_LCL_VAR) // if (!parent->OperIs(GT_ADDR, GT_ASG)) { // Don't transform other operations such as GT_RETURN // return; } #ifdef DEBUG // This is an additional DEBUG-only sanity check // assert(structPromotionHelper != nullptr); structPromotionHelper->CheckRetypedAsScalar(field->gtFldHnd, fieldType); #endif // DEBUG } } tree->SetOper(GT_LCL_VAR); tree->AsLclVarCommon()->SetLclNum(fieldLclIndex); tree->gtType = fieldType; tree->gtFlags &= GTF_NODE_MASK; // Note: that clears all flags except `GTF_COLON_COND`. if (parent->gtOper == GT_ASG) { // If we are changing the left side of an assignment, we need to set // these two flags: // if (parent->AsOp()->gtOp1 == tree) { tree->gtFlags |= GTF_VAR_DEF; tree->gtFlags |= GTF_DONT_CSE; } // Promotion of struct containing struct fields where the field // is a struct with a single pointer sized scalar type field: in // this case struct promotion uses the type of the underlying // scalar field as the type of struct field instead of recursively // promoting. This can lead to a case where we have a block-asgn // with its RHS replaced with a scalar type. Mark RHS value as // DONT_CSE so that assertion prop will not do const propagation. // The reason this is required is that if RHS of a block-asg is a // constant, then it is interpreted as init-block incorrectly. // // TODO - This can also be avoided if we implement recursive struct // promotion, tracked by #10019. if (varTypeIsStruct(parent) && parent->AsOp()->gtOp2 == tree && !varTypeIsStruct(tree)) { tree->gtFlags |= GTF_DONT_CSE; } } #ifdef DEBUG if (verbose) { printf("Replacing the field in promoted struct with local var V%02u\n", fieldLclIndex); } #endif // DEBUG } } else { // Normed struct // A "normed struct" is a struct that the VM tells us is a basic type. This can only happen if // the struct contains a single element, and that element is 4 bytes (on x64 it can also be 8 // bytes). Normally, the type of the local var and the type of GT_FIELD are equivalent. However, // there is one extremely rare case where that won't be true. An enum type is a special value type // that contains exactly one element of a primitive integer type (that, for CLS programs is named // "value__"). The VM tells us that a local var of that enum type is the primitive type of the // enum's single field. It turns out that it is legal for IL to access this field using ldflda or // ldfld. For example: // // .class public auto ansi sealed mynamespace.e_t extends [mscorlib]System.Enum // { // .field public specialname rtspecialname int16 value__ // .field public static literal valuetype mynamespace.e_t one = int16(0x0000) // } // .method public hidebysig static void Main() cil managed // { // .locals init (valuetype mynamespace.e_t V_0) // ... // ldloca.s V_0 // ldflda int16 mynamespace.e_t::value__ // ... // } // // Normally, compilers will not generate the ldflda, since it is superfluous. // // In the example, the lclVar is short, but the JIT promotes all trees using this local to the // "actual type", that is, INT. But the GT_FIELD is still SHORT. So, in the case of a type // mismatch like this, don't do this morphing. The local var may end up getting marked as // address taken, and the appropriate SHORT load will be done from memory in that case. if (tree->TypeGet() == obj->TypeGet()) { tree->ChangeOper(GT_LCL_VAR); tree->AsLclVarCommon()->SetLclNum(lclNum); tree->gtFlags &= GTF_NODE_MASK; if ((parent->gtOper == GT_ASG) && (parent->AsOp()->gtOp1 == tree)) { tree->gtFlags |= GTF_VAR_DEF; tree->gtFlags |= GTF_DONT_CSE; } #ifdef DEBUG if (verbose) { printf("Replacing the field in normed struct with local var V%02u\n", lclNum); } #endif // DEBUG } } } } void Compiler::fgMorphLocalField(GenTree* tree, GenTree* parent) { noway_assert(tree->OperGet() == GT_LCL_FLD); unsigned lclNum = tree->AsLclFld()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varTypeIsStruct(varDsc)) { if (varDsc->lvPromoted) { // Promoted struct unsigned fldOffset = tree->AsLclFld()->GetLclOffs(); unsigned fieldLclIndex = 0; LclVarDsc* fldVarDsc = nullptr; if (fldOffset != BAD_VAR_NUM) { fieldLclIndex = lvaGetFieldLocal(varDsc, fldOffset); noway_assert(fieldLclIndex != BAD_VAR_NUM); fldVarDsc = lvaGetDesc(fieldLclIndex); } var_types treeType = tree->TypeGet(); var_types fieldType = fldVarDsc->TypeGet(); if (fldOffset != BAD_VAR_NUM && ((genTypeSize(fieldType) == genTypeSize(treeType)) || (varDsc->lvFieldCnt == 1))) { // There is an existing sub-field we can use. tree->AsLclFld()->SetLclNum(fieldLclIndex); // The field must be an enregisterable type; otherwise it would not be a promoted field. // The tree type may not match, e.g. for return types that have been morphed, but both // must be enregisterable types. assert(varTypeIsEnregisterable(treeType) && varTypeIsEnregisterable(fieldType)); tree->ChangeOper(GT_LCL_VAR); assert(tree->AsLclVarCommon()->GetLclNum() == fieldLclIndex); tree->gtType = fldVarDsc->TypeGet(); if ((parent->gtOper == GT_ASG) && (parent->AsOp()->gtOp1 == tree)) { tree->gtFlags |= GTF_VAR_DEF; tree->gtFlags |= GTF_DONT_CSE; } JITDUMP("Replacing the GT_LCL_FLD in promoted struct with local var V%02u\n", fieldLclIndex); } else { // There is no existing field that has all the parts that we need // So we must ensure that the struct lives in memory. lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField)); #ifdef DEBUG // We can't convert this guy to a float because he really does have his // address taken.. varDsc->lvKeepType = 1; #endif // DEBUG } } else if (varTypeIsSIMD(varDsc) && (genTypeSize(tree->TypeGet()) == genTypeSize(varDsc))) { assert(tree->AsLclFld()->GetLclOffs() == 0); tree->gtType = varDsc->TypeGet(); tree->ChangeOper(GT_LCL_VAR); JITDUMP("Replacing GT_LCL_FLD of struct with local var V%02u\n", lclNum); } } } //------------------------------------------------------------------------ // fgResetImplicitByRefRefCount: Clear the ref count field of all implicit byrefs void Compiler::fgResetImplicitByRefRefCount() { #if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) #ifdef DEBUG if (verbose) { printf("\n*************** In fgResetImplicitByRefRefCount()\n"); } #endif // DEBUG for (unsigned lclNum = 0; lclNum < info.compArgsCount; ++lclNum) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvIsImplicitByRef) { // Clear the ref count field; fgMarkAddressTakenLocals will increment it per // appearance of implicit-by-ref param so that call arg morphing can do an // optimization for single-use implicit-by-ref params whose single use is as // an outgoing call argument. varDsc->setLvRefCnt(0, RCS_EARLY); varDsc->setLvRefCntWtd(0, RCS_EARLY); } } #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } //------------------------------------------------------------------------ // fgRetypeImplicitByRefArgs: Update the types on implicit byref parameters' `LclVarDsc`s (from // struct to pointer). Also choose (based on address-exposed analysis) // which struct promotions of implicit byrefs to keep or discard. // For those which are kept, insert the appropriate initialization code. // For those which are to be discarded, annotate the promoted field locals // so that fgMorphImplicitByRefArgs will know to rewrite their appearances // using indirections off the pointer parameters. void Compiler::fgRetypeImplicitByRefArgs() { #if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) #ifdef DEBUG if (verbose) { printf("\n*************** In fgRetypeImplicitByRefArgs()\n"); } #endif // DEBUG for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (lvaIsImplicitByRefLocal(lclNum)) { unsigned size; if (varDsc->lvSize() > REGSIZE_BYTES) { size = varDsc->lvSize(); } else { CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd(); size = info.compCompHnd->getClassSize(typeHnd); } if (varDsc->lvPromoted) { // This implicit-by-ref was promoted; create a new temp to represent the // promoted struct before rewriting this parameter as a pointer. unsigned newLclNum = lvaGrabTemp(false DEBUGARG("Promoted implicit byref")); lvaSetStruct(newLclNum, lvaGetStruct(lclNum), true); if (info.compIsVarArgs) { lvaSetStructUsedAsVarArg(newLclNum); } // Update varDsc since lvaGrabTemp might have re-allocated the var dsc array. varDsc = lvaGetDesc(lclNum); // Copy the struct promotion annotations to the new temp. LclVarDsc* newVarDsc = lvaGetDesc(newLclNum); newVarDsc->lvPromoted = true; newVarDsc->lvFieldLclStart = varDsc->lvFieldLclStart; newVarDsc->lvFieldCnt = varDsc->lvFieldCnt; newVarDsc->lvContainsHoles = varDsc->lvContainsHoles; newVarDsc->lvCustomLayout = varDsc->lvCustomLayout; #ifdef DEBUG newVarDsc->lvKeepType = true; #endif // DEBUG // Propagate address-taken-ness and do-not-enregister-ness. newVarDsc->SetAddressExposed(varDsc->IsAddressExposed() DEBUGARG(varDsc->GetAddrExposedReason())); newVarDsc->lvDoNotEnregister = varDsc->lvDoNotEnregister; newVarDsc->lvLiveInOutOfHndlr = varDsc->lvLiveInOutOfHndlr; newVarDsc->lvSingleDef = varDsc->lvSingleDef; newVarDsc->lvSingleDefRegCandidate = varDsc->lvSingleDefRegCandidate; newVarDsc->lvSpillAtSingleDef = varDsc->lvSpillAtSingleDef; #ifdef DEBUG newVarDsc->SetDoNotEnregReason(varDsc->GetDoNotEnregReason()); #endif // DEBUG // If the promotion is dependent, the promoted temp would just be committed // to memory anyway, so we'll rewrite its appearances to be indirections // through the pointer parameter, the same as we'd do for this // parameter if it weren't promoted at all (otherwise the initialization // of the new temp would just be a needless memcpy at method entry). // // Otherwise, see how many appearances there are. We keep two early ref counts: total // number of references to the struct or some field, and how many of these are // arguments to calls. We undo promotion unless we see enough non-call uses. // const unsigned totalAppearances = varDsc->lvRefCnt(RCS_EARLY); const unsigned callAppearances = (unsigned)varDsc->lvRefCntWtd(RCS_EARLY); assert(totalAppearances >= callAppearances); const unsigned nonCallAppearances = totalAppearances - callAppearances; bool undoPromotion = ((lvaGetPromotionType(newVarDsc) == PROMOTION_TYPE_DEPENDENT) || (nonCallAppearances <= varDsc->lvFieldCnt)); #ifdef DEBUG // Above is a profitability heurisic; either value of // undoPromotion should lead to correct code. So, // under stress, make different decisions at times. if (compStressCompile(STRESS_BYREF_PROMOTION, 25)) { undoPromotion = !undoPromotion; JITDUMP("Stress -- changing byref undo promotion for V%02u to %s undo\n", lclNum, undoPromotion ? "" : "NOT"); } #endif // DEBUG JITDUMP("%s promotion of implicit by-ref V%02u: %s total: %u non-call: %u fields: %u\n", undoPromotion ? "Undoing" : "Keeping", lclNum, (lvaGetPromotionType(newVarDsc) == PROMOTION_TYPE_DEPENDENT) ? "dependent;" : "", totalAppearances, nonCallAppearances, varDsc->lvFieldCnt); if (!undoPromotion) { // Insert IR that initializes the temp from the parameter. // LHS is a simple reference to the temp. fgEnsureFirstBBisScratch(); GenTree* lhs = gtNewLclvNode(newLclNum, varDsc->lvType); // RHS is an indirection (using GT_OBJ) off the parameter. GenTree* addr = gtNewLclvNode(lclNum, TYP_BYREF); GenTree* rhs = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, typGetBlkLayout(size)); GenTree* assign = gtNewAssignNode(lhs, rhs); fgNewStmtAtBeg(fgFirstBB, assign); } // Update the locals corresponding to the promoted fields. unsigned fieldLclStart = varDsc->lvFieldLclStart; unsigned fieldCount = varDsc->lvFieldCnt; unsigned fieldLclStop = fieldLclStart + fieldCount; for (unsigned fieldLclNum = fieldLclStart; fieldLclNum < fieldLclStop; ++fieldLclNum) { LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); if (undoPromotion) { // Leave lvParentLcl pointing to the parameter so that fgMorphImplicitByRefArgs // will know to rewrite appearances of this local. assert(fieldVarDsc->lvParentLcl == lclNum); } else { // Set the new parent. fieldVarDsc->lvParentLcl = newLclNum; } fieldVarDsc->lvIsParam = false; // The fields shouldn't inherit any register preferences from // the parameter which is really a pointer to the struct. fieldVarDsc->lvIsRegArg = false; fieldVarDsc->lvIsMultiRegArg = false; fieldVarDsc->SetArgReg(REG_NA); #if FEATURE_MULTIREG_ARGS fieldVarDsc->SetOtherArgReg(REG_NA); #endif } // Hijack lvFieldLclStart to record the new temp number. // It will get fixed up in fgMarkDemotedImplicitByRefArgs. varDsc->lvFieldLclStart = newLclNum; // Go ahead and clear lvFieldCnt -- either we're promoting // a replacement temp or we're not promoting this arg, and // in either case the parameter is now a pointer that doesn't // have these fields. varDsc->lvFieldCnt = 0; // Hijack lvPromoted to communicate to fgMorphImplicitByRefArgs // whether references to the struct should be rewritten as // indirections off the pointer (not promoted) or references // to the new struct local (promoted). varDsc->lvPromoted = !undoPromotion; } else { // The "undo promotion" path above clears lvPromoted for args that struct // promotion wanted to promote but that aren't considered profitable to // rewrite. It hijacks lvFieldLclStart to communicate to // fgMarkDemotedImplicitByRefArgs that it needs to clean up annotations left // on such args for fgMorphImplicitByRefArgs to consult in the interim. // Here we have an arg that was simply never promoted, so make sure it doesn't // have nonzero lvFieldLclStart, since that would confuse fgMorphImplicitByRefArgs // and fgMarkDemotedImplicitByRefArgs. assert(varDsc->lvFieldLclStart == 0); } // Since the parameter in this position is really a pointer, its type is TYP_BYREF. varDsc->lvType = TYP_BYREF; // Since this previously was a TYP_STRUCT and we have changed it to a TYP_BYREF // make sure that the following flag is not set as these will force SSA to // exclude tracking/enregistering these LclVars. (see SsaBuilder::IncludeInSsa) // varDsc->lvOverlappingFields = 0; // This flag could have been set, clear it. // The struct parameter may have had its address taken, but the pointer parameter // cannot -- any uses of the struct parameter's address are uses of the pointer // parameter's value, and there's no way for the MSIL to reference the pointer // parameter's address. So clear the address-taken bit for the parameter. varDsc->CleanAddressExposed(); varDsc->lvDoNotEnregister = 0; #ifdef DEBUG // This should not be converted to a double in stress mode, // because it is really a pointer varDsc->lvKeepType = 1; if (verbose) { printf("Changing the lvType for struct parameter V%02d to TYP_BYREF.\n", lclNum); } #endif // DEBUG } } #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } //------------------------------------------------------------------------ // fgMarkDemotedImplicitByRefArgs: Clear annotations for any implicit byrefs that struct promotion // asked to promote. Appearances of these have now been rewritten // (by fgMorphImplicitByRefArgs) using indirections from the pointer // parameter or references to the promotion temp, as appropriate. void Compiler::fgMarkDemotedImplicitByRefArgs() { JITDUMP("\n*************** In fgMarkDemotedImplicitByRefArgs()\n"); #if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (lvaIsImplicitByRefLocal(lclNum)) { JITDUMP("Clearing annotation for V%02d\n", lclNum); if (varDsc->lvPromoted) { // The parameter is simply a pointer now, so clear lvPromoted. It was left set // by fgRetypeImplicitByRefArgs to communicate to fgMorphImplicitByRefArgs that // appearances of this arg needed to be rewritten to a new promoted struct local. varDsc->lvPromoted = false; // Clear the lvFieldLclStart value that was set by fgRetypeImplicitByRefArgs // to tell fgMorphImplicitByRefArgs which local is the new promoted struct one. varDsc->lvFieldLclStart = 0; } else if (varDsc->lvFieldLclStart != 0) { // We created new temps to represent a promoted struct corresponding to this // parameter, but decided not to go through with the promotion and have // rewritten all uses as indirections off the pointer parameter. // We stashed the pointer to the new struct temp in lvFieldLclStart; make // note of that and clear the annotation. unsigned structLclNum = varDsc->lvFieldLclStart; varDsc->lvFieldLclStart = 0; // The temp struct is now unused; set flags appropriately so that we // won't allocate space for it on the stack. LclVarDsc* structVarDsc = lvaGetDesc(structLclNum); structVarDsc->CleanAddressExposed(); #ifdef DEBUG structVarDsc->lvUnusedStruct = true; structVarDsc->lvUndoneStructPromotion = true; #endif // DEBUG unsigned fieldLclStart = structVarDsc->lvFieldLclStart; unsigned fieldCount = structVarDsc->lvFieldCnt; unsigned fieldLclStop = fieldLclStart + fieldCount; for (unsigned fieldLclNum = fieldLclStart; fieldLclNum < fieldLclStop; ++fieldLclNum) { JITDUMP("Fixing pointer for field V%02d from V%02d to V%02d\n", fieldLclNum, lclNum, structLclNum); // Fix the pointer to the parent local. LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); assert(fieldVarDsc->lvParentLcl == lclNum); fieldVarDsc->lvParentLcl = structLclNum; // The field local is now unused; set flags appropriately so that // we won't allocate stack space for it. fieldVarDsc->CleanAddressExposed(); } } } } #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } /***************************************************************************** * * Morph irregular parameters * for x64 and ARM64 this means turning them into byrefs, adding extra indirs. */ bool Compiler::fgMorphImplicitByRefArgs(GenTree* tree) { #if (!defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI)) && !defined(TARGET_ARM64) return false; #else // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 bool changed = false; // Implicit byref morphing needs to know if the reference to the parameter is a // child of GT_ADDR or not, so this method looks one level down and does the // rewrite whenever a child is a reference to an implicit byref parameter. if (tree->gtOper == GT_ADDR) { if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { GenTree* morphedTree = fgMorphImplicitByRefArgs(tree, true); changed = (morphedTree != nullptr); assert(!changed || (morphedTree == tree)); } } else { for (GenTree** pTree : tree->UseEdges()) { GenTree** pTreeCopy = pTree; GenTree* childTree = *pTree; if (childTree->gtOper == GT_LCL_VAR) { GenTree* newChildTree = fgMorphImplicitByRefArgs(childTree, false); if (newChildTree != nullptr) { changed = true; *pTreeCopy = newChildTree; } } } } return changed; #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } GenTree* Compiler::fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr) { assert((tree->gtOper == GT_LCL_VAR) || ((tree->gtOper == GT_ADDR) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR))); assert(isAddr == (tree->gtOper == GT_ADDR)); GenTree* lclVarTree = isAddr ? tree->AsOp()->gtOp1 : tree; unsigned lclNum = lclVarTree->AsLclVarCommon()->GetLclNum(); LclVarDsc* lclVarDsc = lvaGetDesc(lclNum); CORINFO_FIELD_HANDLE fieldHnd; unsigned fieldOffset = 0; var_types fieldRefType = TYP_UNKNOWN; if (lvaIsImplicitByRefLocal(lclNum)) { // The SIMD transformation to coalesce contiguous references to SIMD vector fields will // re-invoke the traversal to mark address-taken locals. // So, we may encounter a tree that has already been transformed to TYP_BYREF. // If we do, leave it as-is. if (!varTypeIsStruct(lclVarTree)) { assert(lclVarTree->TypeGet() == TYP_BYREF); return nullptr; } else if (lclVarDsc->lvPromoted) { // fgRetypeImplicitByRefArgs created a new promoted struct local to represent this // arg. Rewrite this to refer to the new local. assert(lclVarDsc->lvFieldLclStart != 0); lclVarTree->AsLclVarCommon()->SetLclNum(lclVarDsc->lvFieldLclStart); return tree; } fieldHnd = nullptr; } else if (lclVarDsc->lvIsStructField && lvaIsImplicitByRefLocal(lclVarDsc->lvParentLcl)) { // This was a field reference to an implicit-by-reference struct parameter that was // dependently promoted; update it to a field reference off the pointer. // Grab the field handle from the struct field lclVar. fieldHnd = lclVarDsc->lvFieldHnd; fieldOffset = lclVarDsc->lvFldOffset; assert(fieldHnd != nullptr); // Update lclNum/lclVarDsc to refer to the parameter lclNum = lclVarDsc->lvParentLcl; lclVarDsc = lvaGetDesc(lclNum); fieldRefType = lclVarTree->TypeGet(); } else { // We only need to tranform the 'marked' implicit by ref parameters return nullptr; } // This is no longer a def of the lclVar, even if it WAS a def of the struct. lclVarTree->gtFlags &= ~(GTF_LIVENESS_MASK); if (isAddr) { if (fieldHnd == nullptr) { // change &X into just plain X tree->ReplaceWith(lclVarTree, this); tree->gtType = TYP_BYREF; } else { // change &(X.f) [i.e. GT_ADDR of local for promoted arg field] // into &(X, f) [i.e. GT_ADDR of GT_FIELD off ptr param] lclVarTree->AsLclVarCommon()->SetLclNum(lclNum); lclVarTree->gtType = TYP_BYREF; tree->AsOp()->gtOp1 = gtNewFieldRef(fieldRefType, fieldHnd, lclVarTree, fieldOffset); } #ifdef DEBUG if (verbose) { printf("Replacing address of implicit by ref struct parameter with byref:\n"); } #endif // DEBUG } else { // Change X into OBJ(X) or FIELD(X, f) var_types structType = tree->gtType; tree->gtType = TYP_BYREF; if (fieldHnd) { tree->AsLclVarCommon()->SetLclNum(lclNum); tree = gtNewFieldRef(fieldRefType, fieldHnd, tree, fieldOffset); } else { tree = gtNewObjNode(lclVarDsc->GetStructHnd(), tree); if (structType == TYP_STRUCT) { gtSetObjGcInfo(tree->AsObj()); } } // TODO-CQ: If the VM ever stops violating the ABI and passing heap references // we could remove TGTANYWHERE tree->gtFlags = ((tree->gtFlags & GTF_COMMON_MASK) | GTF_IND_TGTANYWHERE); #ifdef DEBUG if (verbose) { printf("Replacing value of implicit by ref struct parameter with indir of parameter:\n"); } #endif // DEBUG } #ifdef DEBUG if (verbose) { gtDispTree(tree); } #endif // DEBUG return tree; } //------------------------------------------------------------------------ // fgAddFieldSeqForZeroOffset: // Associate a fieldSeq (with a zero offset) with the GenTree node 'addr' // // Arguments: // addr - A GenTree node // fieldSeqZero - a fieldSeq (with a zero offset) // // Notes: // Some GenTree nodes have internal fields that record the field sequence. // If we have one of these nodes: GT_CNS_INT, GT_LCL_FLD // we can append the field sequence using the gtFieldSeq // If we have a GT_ADD of a GT_CNS_INT we can use the // fieldSeq from child node. // Otherwise we record 'fieldSeqZero' in the GenTree node using // a Map: GetFieldSeqStore() // When doing so we take care to preserve any existing zero field sequence // void Compiler::fgAddFieldSeqForZeroOffset(GenTree* addr, FieldSeqNode* fieldSeqZero) { // We expect 'addr' to be an address at this point. assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF); // Tunnel through any commas. const bool commaOnly = true; addr = addr->gtEffectiveVal(commaOnly); // We still expect 'addr' to be an address at this point. assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF); FieldSeqNode* fieldSeqUpdate = fieldSeqZero; GenTree* fieldSeqNode = addr; bool fieldSeqRecorded = false; #ifdef DEBUG if (verbose) { printf("\nfgAddFieldSeqForZeroOffset for"); gtDispAnyFieldSeq(fieldSeqZero); printf("\naddr (Before)\n"); gtDispNode(addr, nullptr, nullptr, false); gtDispCommonEndLine(addr); } #endif // DEBUG switch (addr->OperGet()) { case GT_CNS_INT: fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsIntCon()->gtFieldSeq, fieldSeqZero); addr->AsIntCon()->gtFieldSeq = fieldSeqUpdate; fieldSeqRecorded = true; break; case GT_ADDR: if (addr->AsOp()->gtOp1->OperGet() == GT_LCL_FLD) { fieldSeqNode = addr->AsOp()->gtOp1; GenTreeLclFld* lclFld = addr->AsOp()->gtOp1->AsLclFld(); fieldSeqUpdate = GetFieldSeqStore()->Append(lclFld->GetFieldSeq(), fieldSeqZero); lclFld->SetFieldSeq(fieldSeqUpdate); fieldSeqRecorded = true; } break; case GT_ADD: if (addr->AsOp()->gtOp1->OperGet() == GT_CNS_INT) { fieldSeqNode = addr->AsOp()->gtOp1; fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsOp()->gtOp1->AsIntCon()->gtFieldSeq, fieldSeqZero); addr->AsOp()->gtOp1->AsIntCon()->gtFieldSeq = fieldSeqUpdate; fieldSeqRecorded = true; } else if (addr->AsOp()->gtOp2->OperGet() == GT_CNS_INT) { fieldSeqNode = addr->AsOp()->gtOp2; fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsOp()->gtOp2->AsIntCon()->gtFieldSeq, fieldSeqZero); addr->AsOp()->gtOp2->AsIntCon()->gtFieldSeq = fieldSeqUpdate; fieldSeqRecorded = true; } break; default: break; } if (fieldSeqRecorded == false) { // Record in the general zero-offset map. // The "addr" node might already be annotated with a zero-offset field sequence. FieldSeqNode* existingFieldSeq = nullptr; if (GetZeroOffsetFieldMap()->Lookup(addr, &existingFieldSeq)) { // Append the zero field sequences fieldSeqUpdate = GetFieldSeqStore()->Append(existingFieldSeq, fieldSeqZero); } // Overwrite the field sequence annotation for op1 GetZeroOffsetFieldMap()->Set(addr, fieldSeqUpdate, NodeToFieldSeqMap::Overwrite); fieldSeqRecorded = true; } #ifdef DEBUG if (verbose) { printf(" (After)\n"); gtDispNode(fieldSeqNode, nullptr, nullptr, false); gtDispCommonEndLine(fieldSeqNode); } #endif // DEBUG } #ifdef FEATURE_SIMD //----------------------------------------------------------------------------------- // fgMorphCombineSIMDFieldAssignments: // If the RHS of the input stmt is a read for simd vector X Field, then this function // will keep reading next few stmts based on the vector size(2, 3, 4). // If the next stmts LHS are located contiguous and RHS are also located // contiguous, then we replace those statements with a copyblk. // // Argument: // block - BasicBlock*. block which stmt belongs to // stmt - Statement*. the stmt node we want to check // // return value: // if this funciton successfully optimized the stmts, then return true. Otherwise // return false; bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt) { GenTree* tree = stmt->GetRootNode(); assert(tree->OperGet() == GT_ASG); GenTree* originalLHS = tree->AsOp()->gtOp1; GenTree* prevLHS = tree->AsOp()->gtOp1; GenTree* prevRHS = tree->AsOp()->gtOp2; unsigned index = 0; CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned simdSize = 0; GenTree* simdStructNode = getSIMDStructFromField(prevRHS, &simdBaseJitType, &index, &simdSize, true); if (simdStructNode == nullptr || index != 0 || simdBaseJitType != CORINFO_TYPE_FLOAT) { // if the RHS is not from a SIMD vector field X, then there is no need to check further. return false; } var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); var_types simdType = getSIMDTypeForSize(simdSize); int assignmentsCount = simdSize / genTypeSize(simdBaseType) - 1; int remainingAssignments = assignmentsCount; Statement* curStmt = stmt->GetNextStmt(); Statement* lastStmt = stmt; while (curStmt != nullptr && remainingAssignments > 0) { GenTree* exp = curStmt->GetRootNode(); if (exp->OperGet() != GT_ASG) { break; } GenTree* curLHS = exp->gtGetOp1(); GenTree* curRHS = exp->gtGetOp2(); if (!areArgumentsContiguous(prevLHS, curLHS) || !areArgumentsContiguous(prevRHS, curRHS)) { break; } remainingAssignments--; prevLHS = curLHS; prevRHS = curRHS; lastStmt = curStmt; curStmt = curStmt->GetNextStmt(); } if (remainingAssignments > 0) { // if the left assignments number is bigger than zero, then this means // that the assignments are not assgining to the contiguously memory // locations from same vector. return false; } #ifdef DEBUG if (verbose) { printf("\nFound contiguous assignments from a SIMD vector to memory.\n"); printf("From " FMT_BB ", stmt ", block->bbNum); printStmtID(stmt); printf(" to stmt"); printStmtID(lastStmt); printf("\n"); } #endif for (int i = 0; i < assignmentsCount; i++) { fgRemoveStmt(block, stmt->GetNextStmt()); } GenTree* dstNode; if (originalLHS->OperIs(GT_LCL_FLD)) { dstNode = originalLHS; dstNode->gtType = simdType; dstNode->AsLclFld()->SetFieldSeq(FieldSeqStore::NotAField()); // This may have changed a partial local field into full local field if (dstNode->IsPartialLclFld(this)) { dstNode->gtFlags |= GTF_VAR_USEASG; } else { dstNode->gtFlags &= ~GTF_VAR_USEASG; } } else { GenTree* copyBlkDst = createAddressNodeForSIMDInit(originalLHS, simdSize); if (simdStructNode->OperIsLocal()) { setLclRelatedToSIMDIntrinsic(simdStructNode); } GenTree* copyBlkAddr = copyBlkDst; if (copyBlkAddr->gtOper == GT_LEA) { copyBlkAddr = copyBlkAddr->AsAddrMode()->Base(); } GenTreeLclVarCommon* localDst = copyBlkAddr->IsLocalAddrExpr(); if (localDst != nullptr) { setLclRelatedToSIMDIntrinsic(localDst); } if (simdStructNode->TypeGet() == TYP_BYREF) { assert(simdStructNode->OperIsLocal()); assert(lvaIsImplicitByRefLocal(simdStructNode->AsLclVarCommon()->GetLclNum())); simdStructNode = gtNewIndir(simdType, simdStructNode); } else { assert(varTypeIsSIMD(simdStructNode)); } dstNode = gtNewOperNode(GT_IND, simdType, copyBlkDst); } #ifdef DEBUG if (verbose) { printf("\n" FMT_BB " stmt ", block->bbNum); printStmtID(stmt); printf("(before)\n"); gtDispStmt(stmt); } #endif assert(!simdStructNode->CanCSE()); simdStructNode->ClearDoNotCSE(); tree = gtNewAssignNode(dstNode, simdStructNode); stmt->SetRootNode(tree); // Since we generated a new address node which didn't exist before, // we should expose this address manually here. // TODO-ADDR: Remove this when LocalAddressVisitor transforms all // local field access into LCL_FLDs, at that point we would be // combining 2 existing LCL_FLDs or 2 FIELDs that do not reference // a local and thus cannot result in a new address exposed local. fgMarkAddressExposedLocals(stmt); #ifdef DEBUG if (verbose) { printf("\nReplaced " FMT_BB " stmt", block->bbNum); printStmtID(stmt); printf("(after)\n"); gtDispStmt(stmt); } #endif return true; } #endif // FEATURE_SIMD //------------------------------------------------------------------------ // fgCheckStmtAfterTailCall: check that statements after the tail call stmt // candidate are in one of expected forms, that are desctibed below. // // Return Value: // 'true' if stmts are in the expected form, else 'false'. // bool Compiler::fgCheckStmtAfterTailCall() { // For void calls, we would have created a GT_CALL in the stmt list. // For non-void calls, we would have created a GT_RETURN(GT_CAST(GT_CALL)). // For calls returning structs, we would have a void call, followed by a void return. // For debuggable code, it would be an assignment of the call to a temp // We want to get rid of any of this extra trees, and just leave // the call. Statement* callStmt = fgMorphStmt; Statement* nextMorphStmt = callStmt->GetNextStmt(); // Check that the rest stmts in the block are in one of the following pattern: // 1) ret(void) // 2) ret(cast*(callResultLclVar)) // 3) lclVar = callResultLclVar, the actual ret(lclVar) in another block // 4) nop if (nextMorphStmt != nullptr) { GenTree* callExpr = callStmt->GetRootNode(); if (callExpr->gtOper != GT_ASG) { // The next stmt can be GT_RETURN(TYP_VOID) or GT_RETURN(lclVar), // where lclVar was return buffer in the call for structs or simd. Statement* retStmt = nextMorphStmt; GenTree* retExpr = retStmt->GetRootNode(); noway_assert(retExpr->gtOper == GT_RETURN); nextMorphStmt = retStmt->GetNextStmt(); } else { noway_assert(callExpr->gtGetOp1()->OperIsLocal()); unsigned callResultLclNumber = callExpr->gtGetOp1()->AsLclVarCommon()->GetLclNum(); #if FEATURE_TAILCALL_OPT_SHARED_RETURN // We can have a chain of assignments from the call result to // various inline return spill temps. These are ok as long // as the last one ultimately provides the return value or is ignored. // // And if we're returning a small type we may see a cast // on the source side. while ((nextMorphStmt != nullptr) && (nextMorphStmt->GetRootNode()->OperIs(GT_ASG, GT_NOP))) { if (nextMorphStmt->GetRootNode()->OperIs(GT_NOP)) { nextMorphStmt = nextMorphStmt->GetNextStmt(); continue; } Statement* moveStmt = nextMorphStmt; GenTree* moveExpr = nextMorphStmt->GetRootNode(); GenTree* moveDest = moveExpr->gtGetOp1(); noway_assert(moveDest->OperIsLocal()); // Tunnel through any casts on the source side. GenTree* moveSource = moveExpr->gtGetOp2(); while (moveSource->OperIs(GT_CAST)) { noway_assert(!moveSource->gtOverflow()); moveSource = moveSource->gtGetOp1(); } noway_assert(moveSource->OperIsLocal()); // Verify we're just passing the value from one local to another // along the chain. const unsigned srcLclNum = moveSource->AsLclVarCommon()->GetLclNum(); noway_assert(srcLclNum == callResultLclNumber); const unsigned dstLclNum = moveDest->AsLclVarCommon()->GetLclNum(); callResultLclNumber = dstLclNum; nextMorphStmt = moveStmt->GetNextStmt(); } if (nextMorphStmt != nullptr) #endif { Statement* retStmt = nextMorphStmt; GenTree* retExpr = nextMorphStmt->GetRootNode(); noway_assert(retExpr->gtOper == GT_RETURN); GenTree* treeWithLcl = retExpr->gtGetOp1(); while (treeWithLcl->gtOper == GT_CAST) { noway_assert(!treeWithLcl->gtOverflow()); treeWithLcl = treeWithLcl->gtGetOp1(); } noway_assert(callResultLclNumber == treeWithLcl->AsLclVarCommon()->GetLclNum()); nextMorphStmt = retStmt->GetNextStmt(); } } } return nextMorphStmt == nullptr; } //------------------------------------------------------------------------ // fgCanTailCallViaJitHelper: check whether we can use the faster tailcall // JIT helper on x86. // // Return Value: // 'true' if we can; or 'false' if we should use the generic tailcall mechanism. // bool Compiler::fgCanTailCallViaJitHelper() { #if !defined(TARGET_X86) || defined(UNIX_X86_ABI) || defined(FEATURE_READYTORUN) // On anything except windows X86 we have no faster mechanism available. return false; #else // The JIT helper does not properly handle the case where localloc was used. if (compLocallocUsed) return false; return true; #endif } //------------------------------------------------------------------------ // fgMorphReduceAddOps: reduce successive variable adds into a single multiply, // e.g., i + i + i + i => i * 4. // // Arguments: // tree - tree for reduction // // Return Value: // reduced tree if pattern matches, original tree otherwise // GenTree* Compiler::fgMorphReduceAddOps(GenTree* tree) { // ADD(_, V0) starts the pattern match. if (!tree->OperIs(GT_ADD) || tree->gtOverflow()) { return tree; } #ifndef TARGET_64BIT // Transforming 64-bit ADD to 64-bit MUL on 32-bit system results in replacing // ADD ops with a helper function call. Don't apply optimization in that case. if (tree->TypeGet() == TYP_LONG) { return tree; } #endif GenTree* lclVarTree = tree->AsOp()->gtOp2; GenTree* consTree = tree->AsOp()->gtOp1; GenTree* op1 = consTree; GenTree* op2 = lclVarTree; if (!op2->OperIs(GT_LCL_VAR) || !varTypeIsIntegral(op2)) { return tree; } int foldCount = 0; unsigned lclNum = op2->AsLclVarCommon()->GetLclNum(); // Search for pattern of shape ADD(ADD(ADD(lclNum, lclNum), lclNum), lclNum). while (true) { // ADD(lclNum, lclNum), end of tree if (op1->OperIs(GT_LCL_VAR) && op1->AsLclVarCommon()->GetLclNum() == lclNum && op2->OperIs(GT_LCL_VAR) && op2->AsLclVarCommon()->GetLclNum() == lclNum) { foldCount += 2; break; } // ADD(ADD(X, Y), lclNum), keep descending else if (op1->OperIs(GT_ADD) && !op1->gtOverflow() && op2->OperIs(GT_LCL_VAR) && op2->AsLclVarCommon()->GetLclNum() == lclNum) { foldCount++; op2 = op1->AsOp()->gtOp2; op1 = op1->AsOp()->gtOp1; } // Any other case is a pattern we won't attempt to fold for now. else { return tree; } } // V0 + V0 ... + V0 becomes V0 * foldCount, where postorder transform will optimize // accordingly consTree->BashToConst(foldCount, tree->TypeGet()); GenTree* morphed = gtNewOperNode(GT_MUL, tree->TypeGet(), lclVarTree, consTree); DEBUG_DESTROY_NODE(tree); return morphed; }
1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/jit/simd.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // SIMD Support // // IMPORTANT NOTES AND CAVEATS: // // This implementation is preliminary, and may change dramatically. // // New JIT types, TYP_SIMDxx, are introduced, and the SIMD intrinsics are created as GT_SIMD nodes. // Nodes of SIMD types will be typed as TYP_SIMD* (e.g. TYP_SIMD8, TYP_SIMD16, etc.). // // Note that currently the "reference implementation" is the same as the runtime dll. As such, it is currently // providing implementations for those methods not currently supported by the JIT as intrinsics. // // These are currently recognized using string compares, in order to provide an implementation in the JIT // without taking a dependency on the VM. // Furthermore, in the CTP, in order to limit the impact of doing these string compares // against assembly names, we only look for the SIMDVector assembly if we are compiling a class constructor. This // makes it somewhat more "pay for play" but is a significant usability compromise. // This has been addressed for RTM by doing the assembly recognition in the VM. // -------------------------------------------------------------------------------------- #include "jitpch.h" #include "simd.h" #ifdef _MSC_VER #pragma hdrstop #endif #ifdef FEATURE_SIMD // Intrinsic Id to intrinsic info map const SIMDIntrinsicInfo simdIntrinsicInfoArray[] = { #define SIMD_INTRINSIC(mname, inst, id, name, retType, argCount, arg1, arg2, arg3, t1, t2, t3, t4, t5, t6, t7, t8, t9, \ t10) \ {SIMDIntrinsic##id, mname, inst, retType, argCount, arg1, arg2, arg3, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10}, #include "simdintrinsiclist.h" }; //------------------------------------------------------------------------ // getSIMDVectorLength: Get the length (number of elements of base type) of // SIMD Vector given its size and base (element) type. // // Arguments: // simdSize - size of the SIMD vector // baseType - type of the elements of the SIMD vector // // static int Compiler::getSIMDVectorLength(unsigned simdSize, var_types baseType) { return simdSize / genTypeSize(baseType); } //------------------------------------------------------------------------ // Get the length (number of elements of base type) of SIMD Vector given by typeHnd. // // Arguments: // typeHnd - type handle of the SIMD vector // int Compiler::getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd) { unsigned sizeBytes = 0; CorInfoType baseJitType = getBaseJitTypeAndSizeOfSIMDType(typeHnd, &sizeBytes); var_types baseType = JitType2PreciseVarType(baseJitType); return getSIMDVectorLength(sizeBytes, baseType); } //------------------------------------------------------------------------ // Get the preferred alignment of SIMD vector type for better performance. // // Arguments: // typeHnd - type handle of the SIMD vector // int Compiler::getSIMDTypeAlignment(var_types simdType) { unsigned size = genTypeSize(simdType); #ifdef TARGET_XARCH // Fixed length vectors have the following alignment preference // Vector2 = 8 byte alignment // Vector3/4 = 16-byte alignment // preferred alignment for SSE2 128-bit vectors is 16-bytes if (size == 8) { return 8; } else if (size <= 16) { assert((size == 12) || (size == 16)); return 16; } else { assert(size == 32); return 32; } #elif defined(TARGET_ARM64) // preferred alignment for 64-bit vectors is 8-bytes. // For everything else, 16-bytes. return (size == 8) ? 8 : 16; #else assert(!"getSIMDTypeAlignment() unimplemented on target arch"); unreached(); #endif } //------------------------------------------------------------------------ // Get, and allocate if necessary, the SIMD temp used for various operations. // The temp is allocated as the maximum sized type of all operations required. // // Arguments: // simdType - Required SIMD type // // Returns: // The temp number // unsigned Compiler::getSIMDInitTempVarNum(var_types simdType) { if (lvaSIMDInitTempVarNum == BAD_VAR_NUM) { JITDUMP("Allocating SIMDInitTempVar as %s\n", varTypeName(simdType)); lvaSIMDInitTempVarNum = lvaGrabTempWithImplicitUse(false DEBUGARG("SIMDInitTempVar")); lvaTable[lvaSIMDInitTempVarNum].lvType = simdType; } else if (genTypeSize(lvaTable[lvaSIMDInitTempVarNum].lvType) < genTypeSize(simdType)) { // We want the largest required type size for the temp. JITDUMP("Increasing SIMDInitTempVar type size from %s to %s\n", varTypeName(lvaTable[lvaSIMDInitTempVarNum].lvType), varTypeName(simdType)); lvaTable[lvaSIMDInitTempVarNum].lvType = simdType; } return lvaSIMDInitTempVarNum; } //---------------------------------------------------------------------------------- // Return the base type and size of SIMD vector type given its type handle. // // Arguments: // typeHnd - The handle of the type we're interested in. // sizeBytes - out param // // Return Value: // base type of SIMD vector. // sizeBytes if non-null is set to size in bytes. // // Notes: // If the size of the struct is already known call structSizeMightRepresentSIMDType // to determine if this api needs to be called. // // TODO-Throughput: current implementation parses class name to find base type. Change // this when we implement SIMD intrinsic identification for the final // product. CorInfoType Compiler::getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes /*= nullptr */) { assert(supportSIMDTypes()); if (m_simdHandleCache == nullptr) { if (impInlineInfo == nullptr) { m_simdHandleCache = new (this, CMK_Generic) SIMDHandlesCache(); } else { // Steal the inliner compiler's cache (create it if not available). if (impInlineInfo->InlineRoot->m_simdHandleCache == nullptr) { impInlineInfo->InlineRoot->m_simdHandleCache = new (this, CMK_Generic) SIMDHandlesCache(); } m_simdHandleCache = impInlineInfo->InlineRoot->m_simdHandleCache; } } if (typeHnd == nullptr) { return CORINFO_TYPE_UNDEF; } // fast path search using cached type handles of important types CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned size = 0; // TODO - Optimize SIMD type recognition by IntrinsicAttribute if (isSIMDClass(typeHnd)) { // The most likely to be used type handles are looked up first followed by // less likely to be used type handles if (typeHnd == m_simdHandleCache->SIMDFloatHandle) { simdBaseJitType = CORINFO_TYPE_FLOAT; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<Float>\n"); } else if (typeHnd == m_simdHandleCache->SIMDIntHandle) { simdBaseJitType = CORINFO_TYPE_INT; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<Int>\n"); } else if (typeHnd == m_simdHandleCache->SIMDVector2Handle) { simdBaseJitType = CORINFO_TYPE_FLOAT; size = 2 * genTypeSize(TYP_FLOAT); assert(size == roundUp(info.compCompHnd->getClassSize(typeHnd), TARGET_POINTER_SIZE)); JITDUMP(" Known type Vector2\n"); } else if (typeHnd == m_simdHandleCache->SIMDVector3Handle) { simdBaseJitType = CORINFO_TYPE_FLOAT; size = 3 * genTypeSize(TYP_FLOAT); assert(size == info.compCompHnd->getClassSize(typeHnd)); JITDUMP(" Known type Vector3\n"); } else if (typeHnd == m_simdHandleCache->SIMDVector4Handle) { simdBaseJitType = CORINFO_TYPE_FLOAT; size = 4 * genTypeSize(TYP_FLOAT); assert(size == roundUp(info.compCompHnd->getClassSize(typeHnd), TARGET_POINTER_SIZE)); JITDUMP(" Known type Vector4\n"); } else if (typeHnd == m_simdHandleCache->SIMDVectorHandle) { size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type Vector\n"); } else if (typeHnd == m_simdHandleCache->SIMDUShortHandle) { simdBaseJitType = CORINFO_TYPE_USHORT; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<ushort>\n"); } else if (typeHnd == m_simdHandleCache->SIMDUByteHandle) { simdBaseJitType = CORINFO_TYPE_UBYTE; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<ubyte>\n"); } else if (typeHnd == m_simdHandleCache->SIMDDoubleHandle) { simdBaseJitType = CORINFO_TYPE_DOUBLE; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<Double>\n"); } else if (typeHnd == m_simdHandleCache->SIMDLongHandle) { simdBaseJitType = CORINFO_TYPE_LONG; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<Long>\n"); } else if (typeHnd == m_simdHandleCache->SIMDShortHandle) { simdBaseJitType = CORINFO_TYPE_SHORT; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<short>\n"); } else if (typeHnd == m_simdHandleCache->SIMDByteHandle) { simdBaseJitType = CORINFO_TYPE_BYTE; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<byte>\n"); } else if (typeHnd == m_simdHandleCache->SIMDUIntHandle) { simdBaseJitType = CORINFO_TYPE_UINT; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<uint>\n"); } else if (typeHnd == m_simdHandleCache->SIMDULongHandle) { simdBaseJitType = CORINFO_TYPE_ULONG; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<ulong>\n"); } else if (typeHnd == m_simdHandleCache->SIMDNIntHandle) { simdBaseJitType = CORINFO_TYPE_NATIVEINT; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<nint>\n"); } else if (typeHnd == m_simdHandleCache->SIMDNUIntHandle) { simdBaseJitType = CORINFO_TYPE_NATIVEUINT; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<nuint>\n"); } // slow path search if (simdBaseJitType == CORINFO_TYPE_UNDEF) { // Doesn't match with any of the cached type handles. // Obtain base type by parsing fully qualified class name. // // TODO-Throughput: implement product shipping solution to query base type. WCHAR className[256] = {0}; WCHAR* pbuf = &className[0]; int len = ArrLen(className); info.compCompHnd->appendClassName((char16_t**)&pbuf, &len, typeHnd, true, false, false); noway_assert(pbuf < &className[256]); JITDUMP("SIMD Candidate Type %S\n", className); if (wcsncmp(className, W("System.Numerics."), 16) == 0) { if (wcsncmp(&(className[16]), W("Vector`1["), 9) == 0) { size = getSIMDVectorRegisterByteLength(); if (wcsncmp(&(className[25]), W("System.Single"), 13) == 0) { m_simdHandleCache->SIMDFloatHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_FLOAT; JITDUMP(" Found type SIMD Vector<Float>\n"); } else if (wcsncmp(&(className[25]), W("System.Int32"), 12) == 0) { m_simdHandleCache->SIMDIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_INT; JITDUMP(" Found type SIMD Vector<Int>\n"); } else if (wcsncmp(&(className[25]), W("System.UInt16"), 13) == 0) { m_simdHandleCache->SIMDUShortHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_USHORT; JITDUMP(" Found type SIMD Vector<ushort>\n"); } else if (wcsncmp(&(className[25]), W("System.Byte"), 11) == 0) { m_simdHandleCache->SIMDUByteHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_UBYTE; JITDUMP(" Found type SIMD Vector<ubyte>\n"); } else if (wcsncmp(&(className[25]), W("System.Double"), 13) == 0) { m_simdHandleCache->SIMDDoubleHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_DOUBLE; JITDUMP(" Found type SIMD Vector<Double>\n"); } else if (wcsncmp(&(className[25]), W("System.Int64"), 12) == 0) { m_simdHandleCache->SIMDLongHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_LONG; JITDUMP(" Found type SIMD Vector<Long>\n"); } else if (wcsncmp(&(className[25]), W("System.Int16"), 12) == 0) { m_simdHandleCache->SIMDShortHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_SHORT; JITDUMP(" Found type SIMD Vector<short>\n"); } else if (wcsncmp(&(className[25]), W("System.SByte"), 12) == 0) { m_simdHandleCache->SIMDByteHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_BYTE; JITDUMP(" Found type SIMD Vector<byte>\n"); } else if (wcsncmp(&(className[25]), W("System.UInt32"), 13) == 0) { m_simdHandleCache->SIMDUIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_UINT; JITDUMP(" Found type SIMD Vector<uint>\n"); } else if (wcsncmp(&(className[25]), W("System.UInt64"), 13) == 0) { m_simdHandleCache->SIMDULongHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_ULONG; JITDUMP(" Found type SIMD Vector<ulong>\n"); } else if (wcsncmp(&(className[25]), W("System.IntPtr"), 13) == 0) { m_simdHandleCache->SIMDNIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_NATIVEINT; JITDUMP(" Found type SIMD Vector<nint>\n"); } else if (wcsncmp(&(className[25]), W("System.UIntPtr"), 14) == 0) { m_simdHandleCache->SIMDNUIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_NATIVEUINT; JITDUMP(" Found type SIMD Vector<nuint>\n"); } else { JITDUMP(" Unknown SIMD Vector<T>\n"); } } else if (wcsncmp(&(className[16]), W("Vector2"), 8) == 0) { m_simdHandleCache->SIMDVector2Handle = typeHnd; simdBaseJitType = CORINFO_TYPE_FLOAT; size = 2 * genTypeSize(TYP_FLOAT); assert(size == roundUp(info.compCompHnd->getClassSize(typeHnd), TARGET_POINTER_SIZE)); JITDUMP(" Found Vector2\n"); } else if (wcsncmp(&(className[16]), W("Vector3"), 8) == 0) { m_simdHandleCache->SIMDVector3Handle = typeHnd; simdBaseJitType = CORINFO_TYPE_FLOAT; size = 3 * genTypeSize(TYP_FLOAT); assert(size == info.compCompHnd->getClassSize(typeHnd)); JITDUMP(" Found Vector3\n"); } else if (wcsncmp(&(className[16]), W("Vector4"), 8) == 0) { m_simdHandleCache->SIMDVector4Handle = typeHnd; simdBaseJitType = CORINFO_TYPE_FLOAT; size = 4 * genTypeSize(TYP_FLOAT); assert(size == roundUp(info.compCompHnd->getClassSize(typeHnd), TARGET_POINTER_SIZE)); JITDUMP(" Found Vector4\n"); } else if (wcsncmp(&(className[16]), W("Vector"), 6) == 0) { m_simdHandleCache->SIMDVectorHandle = typeHnd; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Found type Vector\n"); } else { JITDUMP(" Unknown SIMD Type\n"); } } } } #ifdef FEATURE_HW_INTRINSICS else if (isIntrinsicType(typeHnd)) { const size_t Vector64SizeBytes = 64 / 8; const size_t Vector128SizeBytes = 128 / 8; const size_t Vector256SizeBytes = 256 / 8; #if defined(TARGET_XARCH) static_assert_no_msg(YMM_REGSIZE_BYTES == Vector256SizeBytes); static_assert_no_msg(XMM_REGSIZE_BYTES == Vector128SizeBytes); if (typeHnd == m_simdHandleCache->Vector256FloatHandle) { simdBaseJitType = CORINFO_TYPE_FLOAT; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<float>\n"); } else if (typeHnd == m_simdHandleCache->Vector256DoubleHandle) { simdBaseJitType = CORINFO_TYPE_DOUBLE; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<double>\n"); } else if (typeHnd == m_simdHandleCache->Vector256IntHandle) { simdBaseJitType = CORINFO_TYPE_INT; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<int>\n"); } else if (typeHnd == m_simdHandleCache->Vector256UIntHandle) { simdBaseJitType = CORINFO_TYPE_UINT; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<uint>\n"); } else if (typeHnd == m_simdHandleCache->Vector256ShortHandle) { simdBaseJitType = CORINFO_TYPE_SHORT; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<short>\n"); } else if (typeHnd == m_simdHandleCache->Vector256UShortHandle) { simdBaseJitType = CORINFO_TYPE_USHORT; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<ushort>\n"); } else if (typeHnd == m_simdHandleCache->Vector256ByteHandle) { simdBaseJitType = CORINFO_TYPE_BYTE; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<sbyte>\n"); } else if (typeHnd == m_simdHandleCache->Vector256UByteHandle) { simdBaseJitType = CORINFO_TYPE_UBYTE; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<byte>\n"); } else if (typeHnd == m_simdHandleCache->Vector256LongHandle) { simdBaseJitType = CORINFO_TYPE_LONG; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<long>\n"); } else if (typeHnd == m_simdHandleCache->Vector256ULongHandle) { simdBaseJitType = CORINFO_TYPE_ULONG; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<ulong>\n"); } else if (typeHnd == m_simdHandleCache->Vector256NIntHandle) { simdBaseJitType = CORINFO_TYPE_NATIVEINT; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<nint>\n"); } else if (typeHnd == m_simdHandleCache->Vector256NUIntHandle) { simdBaseJitType = CORINFO_TYPE_NATIVEUINT; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<nuint>\n"); } else #endif // defined(TARGET_XARCH) if (typeHnd == m_simdHandleCache->Vector128FloatHandle) { simdBaseJitType = CORINFO_TYPE_FLOAT; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<float>\n"); } else if (typeHnd == m_simdHandleCache->Vector128DoubleHandle) { simdBaseJitType = CORINFO_TYPE_DOUBLE; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<double>\n"); } else if (typeHnd == m_simdHandleCache->Vector128IntHandle) { simdBaseJitType = CORINFO_TYPE_INT; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<int>\n"); } else if (typeHnd == m_simdHandleCache->Vector128UIntHandle) { simdBaseJitType = CORINFO_TYPE_UINT; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<uint>\n"); } else if (typeHnd == m_simdHandleCache->Vector128ShortHandle) { simdBaseJitType = CORINFO_TYPE_SHORT; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<short>\n"); } else if (typeHnd == m_simdHandleCache->Vector128UShortHandle) { simdBaseJitType = CORINFO_TYPE_USHORT; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<ushort>\n"); } else if (typeHnd == m_simdHandleCache->Vector128ByteHandle) { simdBaseJitType = CORINFO_TYPE_BYTE; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<sbyte>\n"); } else if (typeHnd == m_simdHandleCache->Vector128UByteHandle) { simdBaseJitType = CORINFO_TYPE_UBYTE; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<byte>\n"); } else if (typeHnd == m_simdHandleCache->Vector128LongHandle) { simdBaseJitType = CORINFO_TYPE_LONG; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<long>\n"); } else if (typeHnd == m_simdHandleCache->Vector128ULongHandle) { simdBaseJitType = CORINFO_TYPE_ULONG; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<ulong>\n"); } else if (typeHnd == m_simdHandleCache->Vector128NIntHandle) { simdBaseJitType = CORINFO_TYPE_NATIVEINT; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<nint>\n"); } else if (typeHnd == m_simdHandleCache->Vector128NUIntHandle) { simdBaseJitType = CORINFO_TYPE_NATIVEUINT; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<nuint>\n"); } else #if defined(TARGET_ARM64) if (typeHnd == m_simdHandleCache->Vector64FloatHandle) { simdBaseJitType = CORINFO_TYPE_FLOAT; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<float>\n"); } else if (typeHnd == m_simdHandleCache->Vector64DoubleHandle) { simdBaseJitType = CORINFO_TYPE_DOUBLE; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<double>\n"); } else if (typeHnd == m_simdHandleCache->Vector64IntHandle) { simdBaseJitType = CORINFO_TYPE_INT; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<int>\n"); } else if (typeHnd == m_simdHandleCache->Vector64UIntHandle) { simdBaseJitType = CORINFO_TYPE_UINT; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<uint>\n"); } else if (typeHnd == m_simdHandleCache->Vector64ShortHandle) { simdBaseJitType = CORINFO_TYPE_SHORT; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<short>\n"); } else if (typeHnd == m_simdHandleCache->Vector64UShortHandle) { simdBaseJitType = CORINFO_TYPE_USHORT; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<ushort>\n"); } else if (typeHnd == m_simdHandleCache->Vector64ByteHandle) { simdBaseJitType = CORINFO_TYPE_BYTE; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<sbyte>\n"); } else if (typeHnd == m_simdHandleCache->Vector64UByteHandle) { simdBaseJitType = CORINFO_TYPE_UBYTE; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<byte>\n"); } else if (typeHnd == m_simdHandleCache->Vector64LongHandle) { simdBaseJitType = CORINFO_TYPE_LONG; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<long>\n"); } else if (typeHnd == m_simdHandleCache->Vector64ULongHandle) { simdBaseJitType = CORINFO_TYPE_ULONG; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<ulong>\n"); } else if (typeHnd == m_simdHandleCache->Vector64NIntHandle) { simdBaseJitType = CORINFO_TYPE_NATIVEINT; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<nint>\n"); } else if (typeHnd == m_simdHandleCache->Vector64NUIntHandle) { simdBaseJitType = CORINFO_TYPE_NATIVEUINT; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<nuint>\n"); } #endif // defined(TARGET_ARM64) // slow path search if (simdBaseJitType == CORINFO_TYPE_UNDEF) { // Doesn't match with any of the cached type handles. const char* className = getClassNameFromMetadata(typeHnd, nullptr); CORINFO_CLASS_HANDLE baseTypeHnd = getTypeInstantiationArgument(typeHnd, 0); if (baseTypeHnd != nullptr) { CorInfoType type = info.compCompHnd->getTypeForPrimitiveNumericClass(baseTypeHnd); JITDUMP("HW Intrinsic SIMD Candidate Type %s with Base Type %s\n", className, getClassNameFromMetadata(baseTypeHnd, nullptr)); #if defined(TARGET_XARCH) if (strcmp(className, "Vector256`1") == 0) { size = Vector256SizeBytes; switch (type) { case CORINFO_TYPE_FLOAT: m_simdHandleCache->Vector256FloatHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_FLOAT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<float>\n"); break; case CORINFO_TYPE_DOUBLE: m_simdHandleCache->Vector256DoubleHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_DOUBLE; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<double>\n"); break; case CORINFO_TYPE_INT: m_simdHandleCache->Vector256IntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_INT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<int>\n"); break; case CORINFO_TYPE_UINT: m_simdHandleCache->Vector256UIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_UINT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<uint>\n"); break; case CORINFO_TYPE_SHORT: m_simdHandleCache->Vector256ShortHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_SHORT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<short>\n"); break; case CORINFO_TYPE_USHORT: m_simdHandleCache->Vector256UShortHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_USHORT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<ushort>\n"); break; case CORINFO_TYPE_LONG: m_simdHandleCache->Vector256LongHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_LONG; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<long>\n"); break; case CORINFO_TYPE_ULONG: m_simdHandleCache->Vector256ULongHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_ULONG; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<ulong>\n"); break; case CORINFO_TYPE_UBYTE: m_simdHandleCache->Vector256UByteHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_UBYTE; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<byte>\n"); break; case CORINFO_TYPE_BYTE: m_simdHandleCache->Vector256ByteHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_BYTE; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<sbyte>\n"); break; case CORINFO_TYPE_NATIVEINT: m_simdHandleCache->Vector256NIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_NATIVEINT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<nint>\n"); break; case CORINFO_TYPE_NATIVEUINT: m_simdHandleCache->Vector256NUIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_NATIVEUINT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<nuint>\n"); break; default: JITDUMP(" Unknown Hardware Intrinsic SIMD Type Vector256<T>\n"); } } else #endif // defined(TARGET_XARCH) if (strcmp(className, "Vector128`1") == 0) { size = Vector128SizeBytes; switch (type) { case CORINFO_TYPE_FLOAT: m_simdHandleCache->Vector128FloatHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_FLOAT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<float>\n"); break; case CORINFO_TYPE_DOUBLE: m_simdHandleCache->Vector128DoubleHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_DOUBLE; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<double>\n"); break; case CORINFO_TYPE_INT: m_simdHandleCache->Vector128IntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_INT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<int>\n"); break; case CORINFO_TYPE_UINT: m_simdHandleCache->Vector128UIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_UINT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<uint>\n"); break; case CORINFO_TYPE_SHORT: m_simdHandleCache->Vector128ShortHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_SHORT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<short>\n"); break; case CORINFO_TYPE_USHORT: m_simdHandleCache->Vector128UShortHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_USHORT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<ushort>\n"); break; case CORINFO_TYPE_LONG: m_simdHandleCache->Vector128LongHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_LONG; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<long>\n"); break; case CORINFO_TYPE_ULONG: m_simdHandleCache->Vector128ULongHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_ULONG; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<ulong>\n"); break; case CORINFO_TYPE_UBYTE: m_simdHandleCache->Vector128UByteHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_UBYTE; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<byte>\n"); break; case CORINFO_TYPE_BYTE: m_simdHandleCache->Vector128ByteHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_BYTE; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<sbyte>\n"); break; case CORINFO_TYPE_NATIVEINT: m_simdHandleCache->Vector128NIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_NATIVEINT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<nint>\n"); break; case CORINFO_TYPE_NATIVEUINT: m_simdHandleCache->Vector128NUIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_NATIVEUINT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<nuint>\n"); break; default: JITDUMP(" Unknown Hardware Intrinsic SIMD Type Vector128<T>\n"); } } #if defined(TARGET_ARM64) else if (strcmp(className, "Vector64`1") == 0) { size = Vector64SizeBytes; switch (type) { case CORINFO_TYPE_FLOAT: m_simdHandleCache->Vector64FloatHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_FLOAT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<float>\n"); break; case CORINFO_TYPE_DOUBLE: m_simdHandleCache->Vector64DoubleHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_DOUBLE; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<double>\n"); break; case CORINFO_TYPE_INT: m_simdHandleCache->Vector64IntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_INT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<int>\n"); break; case CORINFO_TYPE_UINT: m_simdHandleCache->Vector64UIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_UINT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<uint>\n"); break; case CORINFO_TYPE_SHORT: m_simdHandleCache->Vector64ShortHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_SHORT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<short>\n"); break; case CORINFO_TYPE_USHORT: m_simdHandleCache->Vector64UShortHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_USHORT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<ushort>\n"); break; case CORINFO_TYPE_LONG: m_simdHandleCache->Vector64LongHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_LONG; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<long>\n"); break; case CORINFO_TYPE_ULONG: m_simdHandleCache->Vector64ULongHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_ULONG; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<ulong>\n"); break; case CORINFO_TYPE_UBYTE: m_simdHandleCache->Vector64UByteHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_UBYTE; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<byte>\n"); break; case CORINFO_TYPE_BYTE: m_simdHandleCache->Vector64ByteHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_BYTE; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<sbyte>\n"); break; case CORINFO_TYPE_NATIVEINT: m_simdHandleCache->Vector64NIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_NATIVEINT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<nint>\n"); break; case CORINFO_TYPE_NATIVEUINT: m_simdHandleCache->Vector64NUIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_NATIVEUINT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<nuint>\n"); break; default: JITDUMP(" Unknown Hardware Intrinsic SIMD Type Vector64<T>\n"); } } #endif // defined(TARGET_ARM64) } } #if defined(TARGET_XARCH) // Even though Vector256 is TYP_SIMD32, if AVX isn't supported, then it must // be treated as a regular struct if (size == YMM_REGSIZE_BYTES && (simdBaseJitType != CORINFO_TYPE_UNDEF) && !compExactlyDependsOn(InstructionSet_AVX)) { simdBaseJitType = CORINFO_TYPE_UNDEF; } #endif // TARGET_XARCH } #endif // FEATURE_HW_INTRINSICS if (sizeBytes != nullptr) { *sizeBytes = size; } if (simdBaseJitType != CORINFO_TYPE_UNDEF) { setUsesSIMDTypes(true); } return simdBaseJitType; } //-------------------------------------------------------------------------------------- // getSIMDIntrinsicInfo: get SIMD intrinsic info given the method handle. // // Arguments: // inOutTypeHnd - The handle of the type on which the method is invoked. This is an in-out param. // methodHnd - The handle of the method we're interested in. // sig - method signature info // isNewObj - whether this call represents a newboj constructor call // argCount - argument count - out pram // simdBaseJitType - base JIT type of the intrinsic - out param // sizeBytes - size of SIMD vector type on which the method is invoked - out param // // Return Value: // SIMDIntrinsicInfo struct initialized corresponding to methodHnd. // Sets SIMDIntrinsicInfo.id to SIMDIntrinsicInvalid if methodHnd doesn't correspond // to any SIMD intrinsic. Also, sets the out params inOutTypeHnd, argCount, baseType and // sizeBytes. // // Note that VectorMath class doesn't have a base type and first argument of the method // determines the SIMD vector type on which intrinsic is invoked. In such a case inOutTypeHnd // is modified by this routine. // // TODO-Throughput: The current implementation is based on method name string parsing. // Although we now have type identification from the VM, the parsing of intrinsic names // could be made more efficient. // const SIMDIntrinsicInfo* Compiler::getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* inOutTypeHnd, CORINFO_METHOD_HANDLE methodHnd, CORINFO_SIG_INFO* sig, bool isNewObj, unsigned* argCount, CorInfoType* simdBaseJitType, unsigned* sizeBytes) { assert(featureSIMD); assert(simdBaseJitType != nullptr); assert(sizeBytes != nullptr); // get simdBaseJitType and size of the type CORINFO_CLASS_HANDLE typeHnd = *inOutTypeHnd; *simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(typeHnd, sizeBytes); if (typeHnd == m_simdHandleCache->SIMDVectorHandle) { // All of the supported intrinsics on this static class take a first argument that's a vector, // which determines the simdBaseJitType. // The exception is the IsHardwareAccelerated property, which is handled as a special case. assert(*simdBaseJitType == CORINFO_TYPE_UNDEF); if (sig->numArgs == 0) { const SIMDIntrinsicInfo* hwAccelIntrinsicInfo = &(simdIntrinsicInfoArray[SIMDIntrinsicHWAccel]); if ((strcmp(eeGetMethodName(methodHnd, nullptr), hwAccelIntrinsicInfo->methodName) == 0) && JITtype2varType(sig->retType) == hwAccelIntrinsicInfo->retType) { // Sanity check assert(hwAccelIntrinsicInfo->argCount == 0 && hwAccelIntrinsicInfo->isInstMethod == false); return hwAccelIntrinsicInfo; } return nullptr; } else { typeHnd = info.compCompHnd->getArgClass(sig, sig->args); *inOutTypeHnd = typeHnd; *simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(typeHnd, sizeBytes); } } if (*simdBaseJitType == CORINFO_TYPE_UNDEF) { JITDUMP("NOT a SIMD Intrinsic: unsupported baseType\n"); return nullptr; } var_types simdBaseType = JitType2PreciseVarType(*simdBaseJitType); // account for implicit "this" arg *argCount = sig->numArgs; if (sig->hasThis()) { *argCount += 1; } // Get the Intrinsic Id by parsing method name. // // TODO-Throughput: replace sequential search by binary search by arranging entries // sorted by method name. SIMDIntrinsicID intrinsicId = SIMDIntrinsicInvalid; const char* methodName = eeGetMethodName(methodHnd, nullptr); for (int i = SIMDIntrinsicNone + 1; i < SIMDIntrinsicInvalid; ++i) { if (strcmp(methodName, simdIntrinsicInfoArray[i].methodName) == 0) { // Found an entry for the method; further check whether it is one of // the supported base types. bool found = false; for (int j = 0; j < SIMD_INTRINSIC_MAX_BASETYPE_COUNT; ++j) { // Convention: if there are fewer base types supported than MAX_BASETYPE_COUNT, // the end of the list is marked by TYP_UNDEF. if (simdIntrinsicInfoArray[i].supportedBaseTypes[j] == TYP_UNDEF) { break; } if (simdIntrinsicInfoArray[i].supportedBaseTypes[j] == simdBaseType) { found = true; break; } } if (!found) { continue; } // Now, check the arguments. unsigned int fixedArgCnt = simdIntrinsicInfoArray[i].argCount; unsigned int expectedArgCnt = fixedArgCnt; // First handle SIMDIntrinsicInitN, where the arg count depends on the type. // The listed arg types include the vector and the first two init values, which is the expected number // for Vector2. For other cases, we'll check their types here. if (*argCount > expectedArgCnt) { if (i == SIMDIntrinsicInitN) { if (*argCount == 3 && typeHnd == m_simdHandleCache->SIMDVector2Handle) { expectedArgCnt = 3; } else if (*argCount == 4 && typeHnd == m_simdHandleCache->SIMDVector3Handle) { expectedArgCnt = 4; } else if (*argCount == 5 && typeHnd == m_simdHandleCache->SIMDVector4Handle) { expectedArgCnt = 5; } } else if (i == SIMDIntrinsicInitFixed) { if (*argCount == 4 && typeHnd == m_simdHandleCache->SIMDVector4Handle) { expectedArgCnt = 4; } } } if (*argCount != expectedArgCnt) { continue; } // Validate the types of individual args passed are what is expected of. // If any of the types don't match with what is expected, don't consider // as an intrinsic. This will make an older JIT with SIMD capabilities // resilient to breaking changes to SIMD managed API. // // Note that from IL type stack, args get popped in right to left order // whereas args get listed in method signatures in left to right order. int stackIndex = (expectedArgCnt - 1); // Track the arguments from the signature - we currently only use this to distinguish // integral and pointer types, both of which will by TYP_I_IMPL on the importer stack. CORINFO_ARG_LIST_HANDLE argLst = sig->args; CORINFO_CLASS_HANDLE argClass; for (unsigned int argIndex = 0; found == true && argIndex < expectedArgCnt; argIndex++) { bool isThisPtr = ((argIndex == 0) && sig->hasThis()); // In case of "newobj SIMDVector<T>(T val)", thisPtr won't be present on type stack. // We don't check anything in that case. if (!isThisPtr || !isNewObj) { GenTree* arg = impStackTop(stackIndex).val; var_types argType = arg->TypeGet(); var_types expectedArgType; if (argIndex < fixedArgCnt) { // Convention: // - intrinsicInfo.argType[i] == TYP_UNDEF - intrinsic doesn't have a valid arg at position i // - intrinsicInfo.argType[i] == TYP_UNKNOWN - arg type should be same as simdBaseType // Note that we pop the args off in reverse order. expectedArgType = simdIntrinsicInfoArray[i].argType[argIndex]; assert(expectedArgType != TYP_UNDEF); if (expectedArgType == TYP_UNKNOWN) { // The type of the argument will be genActualType(*simdBaseType). expectedArgType = genActualType(simdBaseType); argType = genActualType(argType); } } else { expectedArgType = simdBaseType; } if (!isThisPtr && argType == TYP_I_IMPL) { // The reference implementation has a constructor that takes a pointer. // We don't want to recognize that one. This requires us to look at the CorInfoType // in order to distinguish a signature with a pointer argument from one with an // integer argument of pointer size, both of which will be TYP_I_IMPL on the stack. // TODO-Review: This seems quite fragile. We should consider beefing up the checking // here. CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass)); if (corType == CORINFO_TYPE_PTR) { found = false; } } if (varTypeIsSIMD(argType)) { argType = TYP_STRUCT; } if (argType != expectedArgType) { found = false; } } if (argIndex != 0 || !sig->hasThis()) { argLst = info.compCompHnd->getArgNext(argLst); } stackIndex--; } // Cross check return type and static vs. instance is what we are expecting. // If not, don't consider it as an intrinsic. // Note that ret type of TYP_UNKNOWN means that it is not known apriori and must be same as simdBaseType if (found) { var_types expectedRetType = simdIntrinsicInfoArray[i].retType; if (expectedRetType == TYP_UNKNOWN) { // JIT maps uint/ulong type vars to TYP_INT/TYP_LONG. expectedRetType = (simdBaseType == TYP_UINT || simdBaseType == TYP_ULONG) ? genActualType(simdBaseType) : simdBaseType; } if (JITtype2varType(sig->retType) != expectedRetType || sig->hasThis() != simdIntrinsicInfoArray[i].isInstMethod) { found = false; } } if (found) { intrinsicId = (SIMDIntrinsicID)i; break; } } } if (intrinsicId != SIMDIntrinsicInvalid) { JITDUMP("Method %s maps to SIMD intrinsic %s\n", methodName, simdIntrinsicNames[intrinsicId]); return &simdIntrinsicInfoArray[intrinsicId]; } else { JITDUMP("Method %s is NOT a SIMD intrinsic\n", methodName); } return nullptr; } /* static */ bool Compiler::vnEncodesResultTypeForSIMDIntrinsic(SIMDIntrinsicID intrinsicId) { switch (intrinsicId) { case SIMDIntrinsicInit: case SIMDIntrinsicSub: case SIMDIntrinsicEqual: case SIMDIntrinsicBitwiseAnd: case SIMDIntrinsicBitwiseOr: case SIMDIntrinsicCast: return true; default: break; } return false; } // Pops and returns GenTree node from importer's type stack. // Normalizes TYP_STRUCT value in case of GT_CALL, GT_RET_EXPR and arg nodes. // // Arguments: // type - the type of value that the caller expects to be popped off the stack. // expectAddr - if true indicates we are expecting type stack entry to be a TYP_BYREF. // structHandle - the class handle to use when normalizing if it is not the same as the stack entry class handle; // this can happen for certain scenarios, such as folding away a static cast, where we want the // value popped to have the type that would have been returned. // // Notes: // If the popped value is a struct, and the expected type is a simd type, it will be set // to that type, otherwise it will assert if the type being popped is not the expected type. GenTree* Compiler::impSIMDPopStack(var_types type, bool expectAddr, CORINFO_CLASS_HANDLE structHandle) { StackEntry se = impPopStack(); typeInfo ti = se.seTypeInfo; GenTree* tree = se.val; // If expectAddr is true implies what we have on stack is address and we need // SIMD type struct that it points to. if (expectAddr) { assert(tree->TypeIs(TYP_BYREF, TYP_I_IMPL)); if (tree->OperGet() == GT_ADDR) { tree = tree->gtGetOp1(); } else { tree = gtNewOperNode(GT_IND, type, tree); } } bool isParam = false; // If we are popping a struct type it must have a matching handle if one is specified. // - If we have an existing 'OBJ' and 'structHandle' is specified, we will change its // handle if it doesn't match. // This can happen when we have a retyping of a vector that doesn't translate to any // actual IR. // - (If it's not an OBJ and it's used in a parameter context where it is required, // impNormStructVal will add one). // if (tree->OperGet() == GT_OBJ) { if ((structHandle != NO_CLASS_HANDLE) && (tree->AsObj()->GetLayout()->GetClassHandle() != structHandle)) { // In this case we need to retain the GT_OBJ to retype the value. tree->AsObj()->SetLayout(typGetObjLayout(structHandle)); } else { GenTree* addr = tree->AsOp()->gtOp1; if ((addr->OperGet() == GT_ADDR) && isSIMDTypeLocal(addr->AsOp()->gtOp1)) { tree = addr->AsOp()->gtOp1; } } } if (tree->OperGet() == GT_LCL_VAR) { isParam = lvaGetDesc(tree->AsLclVarCommon())->lvIsParam; } // normalize TYP_STRUCT value if (varTypeIsStruct(tree) && ((tree->OperGet() == GT_RET_EXPR) || (tree->OperGet() == GT_CALL) || isParam)) { assert(ti.IsType(TI_STRUCT)); if (structHandle == nullptr) { structHandle = ti.GetClassHandleForValueClass(); } tree = impNormStructVal(tree, structHandle, (unsigned)CHECK_SPILL_ALL); } // Now set the type of the tree to the specialized SIMD struct type, if applicable. if (genActualType(tree->gtType) != genActualType(type)) { assert(tree->gtType == TYP_STRUCT); tree->gtType = type; } else if (tree->gtType == TYP_BYREF) { assert(tree->IsLocal() || (tree->OperGet() == GT_RET_EXPR) || (tree->OperGet() == GT_CALL) || ((tree->gtOper == GT_ADDR) && varTypeIsSIMD(tree->gtGetOp1()))); } return tree; } #ifdef TARGET_XARCH // impSIMDLongRelOpEqual: transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain == comparison result. // // Arguments: // typeHnd - type handle of SIMD vector // size - SIMD vector size // op1 - in-out parameter; first operand // op2 - in-out parameter; second operand // // Return Value: // Modifies in-out params op1, op2 and returns intrinsic ID to be applied to modified operands // SIMDIntrinsicID Compiler::impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd, unsigned size, GenTree** pOp1, GenTree** pOp2) { var_types simdType = (*pOp1)->TypeGet(); assert(varTypeIsSIMD(simdType) && ((*pOp2)->TypeGet() == simdType)); // There is no direct SSE2 support for comparing TYP_LONG vectors. // These have to be implemented in terms of TYP_INT vector comparison operations. // // Equality(v1, v2): // tmp = (v1 == v2) i.e. compare for equality as if v1 and v2 are vector<int> // result = BitwiseAnd(t, shuffle(t, (2, 3, 0, 1))) // Shuffle is meant to swap the comparison results of low-32-bits and high 32-bits of respective long elements. // Compare vector<long> as if they were vector<int> and assign the result to a temp GenTree* compResult = gtNewSIMDNode(simdType, *pOp1, *pOp2, SIMDIntrinsicEqual, CORINFO_TYPE_INT, size); unsigned lclNum = lvaGrabTemp(true DEBUGARG("SIMD Long ==")); lvaSetStruct(lclNum, typeHnd, false); GenTree* tmp = gtNewLclvNode(lclNum, simdType); GenTree* asg = gtNewTempAssign(lclNum, compResult); // op1 = GT_COMMA(tmp=compResult, tmp) // op2 = Shuffle(tmp, 0xB1) // IntrinsicId = BitwiseAnd *pOp1 = gtNewOperNode(GT_COMMA, simdType, asg, tmp); *pOp2 = gtNewSIMDNode(simdType, gtNewLclvNode(lclNum, simdType), gtNewIconNode(SHUFFLE_ZWXY, TYP_INT), SIMDIntrinsicShuffleSSE2, CORINFO_TYPE_INT, size); return SIMDIntrinsicBitwiseAnd; } #endif // TARGET_XARCH // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain given relop result. // // Arguments: // relOpIntrinsicId - Relational operator SIMD intrinsic // typeHnd - type handle of SIMD vector // size - SIMD vector size // inOutBaseJitType - base JIT type of SIMD vector // pOp1 - in-out parameter; first operand // pOp2 - in-out parameter; second operand // // Return Value: // Modifies in-out params pOp1, pOp2, inOutBaseType and returns intrinsic ID to be applied to modified operands // SIMDIntrinsicID Compiler::impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId, CORINFO_CLASS_HANDLE typeHnd, unsigned size, CorInfoType* inOutBaseJitType, GenTree** pOp1, GenTree** pOp2) { var_types simdType = (*pOp1)->TypeGet(); assert(varTypeIsSIMD(simdType) && ((*pOp2)->TypeGet() == simdType)); assert(isRelOpSIMDIntrinsic(relOpIntrinsicId)); SIMDIntrinsicID intrinsicID = relOpIntrinsicId; #ifdef TARGET_XARCH CorInfoType simdBaseJitType = *inOutBaseJitType; var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); if (varTypeIsFloating(simdBaseType)) { } else if (varTypeIsIntegral(simdBaseType)) { if ((getSIMDSupportLevel() == SIMD_SSE2_Supported) && simdBaseType == TYP_LONG) { // There is no direct SSE2 support for comparing TYP_LONG vectors. // These have to be implemented interms of TYP_INT vector comparison operations. if (intrinsicID == SIMDIntrinsicEqual) { intrinsicID = impSIMDLongRelOpEqual(typeHnd, size, pOp1, pOp2); } else { unreached(); } } // SSE2 and AVX direct support for signed comparison of int32, int16 and int8 types else if (varTypeIsUnsigned(simdBaseType)) { // Vector<byte>, Vector<ushort>, Vector<uint> and Vector<ulong>: // SSE2 supports > for signed comparison. Therefore, to use it for // comparing unsigned numbers, we subtract a constant from both the // operands such that the result fits within the corresponding signed // type. The resulting signed numbers are compared using SSE2 signed // comparison. // // Vector<byte>: constant to be subtracted is 2^7 // Vector<ushort> constant to be subtracted is 2^15 // Vector<uint> constant to be subtracted is 2^31 // Vector<ulong> constant to be subtracted is 2^63 // // We need to treat op1 and op2 as signed for comparison purpose after // the transformation. __int64 constVal = 0; switch (simdBaseType) { case TYP_UBYTE: constVal = 0x80808080; *inOutBaseJitType = CORINFO_TYPE_BYTE; break; case TYP_USHORT: constVal = 0x80008000; *inOutBaseJitType = CORINFO_TYPE_SHORT; break; case TYP_UINT: constVal = 0x80000000; *inOutBaseJitType = CORINFO_TYPE_INT; break; case TYP_ULONG: constVal = 0x8000000000000000LL; *inOutBaseJitType = CORINFO_TYPE_LONG; break; default: unreached(); break; } assert(constVal != 0); // This transformation is not required for equality. if (intrinsicID != SIMDIntrinsicEqual) { // For constructing const vector use either long or int base type. CorInfoType tempBaseJitType; GenTree* initVal; if (simdBaseType == TYP_ULONG) { tempBaseJitType = CORINFO_TYPE_LONG; initVal = gtNewLconNode(constVal); } else { tempBaseJitType = CORINFO_TYPE_INT; initVal = gtNewIconNode((ssize_t)constVal); } initVal->gtType = JITtype2varType(tempBaseJitType); GenTree* constVector = gtNewSIMDNode(simdType, initVal, SIMDIntrinsicInit, tempBaseJitType, size); // Assign constVector to a temp, since we intend to use it more than once // TODO-CQ: We have quite a few such constant vectors constructed during // the importation of SIMD intrinsics. Make sure that we have a single // temp per distinct constant per method. GenTree* tmp = fgInsertCommaFormTemp(&constVector, typeHnd); // op1 = op1 - constVector // op2 = op2 - constVector *pOp1 = gtNewSIMDNode(simdType, *pOp1, constVector, SIMDIntrinsicSub, simdBaseJitType, size); *pOp2 = gtNewSIMDNode(simdType, *pOp2, tmp, SIMDIntrinsicSub, simdBaseJitType, size); } return impSIMDRelOp(intrinsicID, typeHnd, size, inOutBaseJitType, pOp1, pOp2); } } #elif !defined(TARGET_ARM64) assert(!"impSIMDRelOp() unimplemented on target arch"); unreached(); #endif // !TARGET_XARCH return intrinsicID; } //------------------------------------------------------------------------ // getOp1ForConstructor: Get the op1 for a constructor call. // // Arguments: // opcode - the opcode being handled (needed to identify the CEE_NEWOBJ case) // newobjThis - For CEE_NEWOBJ, this is the temp grabbed for the allocated uninitalized object. // clsHnd - The handle of the class of the method. // // Return Value: // The tree node representing the object to be initialized with the constructor. // // Notes: // This method handles the differences between the CEE_NEWOBJ and constructor cases. // GenTree* Compiler::getOp1ForConstructor(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd) { GenTree* op1; if (opcode == CEE_NEWOBJ) { op1 = newobjThis; assert(newobjThis->gtOper == GT_ADDR && newobjThis->AsOp()->gtOp1->gtOper == GT_LCL_VAR); // push newobj result on type stack unsigned tmp = op1->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack()); } else { op1 = impSIMDPopStack(TYP_BYREF); } assert(op1->TypeGet() == TYP_BYREF); return op1; } //------------------------------------------------------------------- // Set the flag that indicates that the lclVar referenced by this tree // is used in a SIMD intrinsic. // Arguments: // tree - GenTree* void Compiler::setLclRelatedToSIMDIntrinsic(GenTree* tree) { assert(tree->OperIsLocal()); LclVarDsc* lclVarDsc = lvaGetDesc(tree->AsLclVarCommon()); lclVarDsc->lvUsedInSIMDIntrinsic = true; } //------------------------------------------------------------- // Check if two field nodes reference at the same memory location. // Notice that this check is just based on pattern matching. // Arguments: // op1 - GenTree*. // op2 - GenTree*. // Return Value: // If op1's parents node and op2's parents node are at the same location, return true. Otherwise, return false bool areFieldsParentsLocatedSame(GenTree* op1, GenTree* op2) { assert(op1->OperGet() == GT_FIELD); assert(op2->OperGet() == GT_FIELD); GenTree* op1ObjRef = op1->AsField()->GetFldObj(); GenTree* op2ObjRef = op2->AsField()->GetFldObj(); while (op1ObjRef != nullptr && op2ObjRef != nullptr) { if (op1ObjRef->OperGet() != op2ObjRef->OperGet()) { break; } else if (op1ObjRef->OperGet() == GT_ADDR) { op1ObjRef = op1ObjRef->AsOp()->gtOp1; op2ObjRef = op2ObjRef->AsOp()->gtOp1; } if (op1ObjRef->OperIsLocal() && op2ObjRef->OperIsLocal() && op1ObjRef->AsLclVarCommon()->GetLclNum() == op2ObjRef->AsLclVarCommon()->GetLclNum()) { return true; } else if (op1ObjRef->OperGet() == GT_FIELD && op2ObjRef->OperGet() == GT_FIELD && op1ObjRef->AsField()->gtFldHnd == op2ObjRef->AsField()->gtFldHnd) { op1ObjRef = op1ObjRef->AsField()->GetFldObj(); op2ObjRef = op2ObjRef->AsField()->GetFldObj(); continue; } else { break; } } return false; } //---------------------------------------------------------------------- // Check whether two field are contiguous // Arguments: // first - GenTree*. The Type of the node should be TYP_FLOAT // second - GenTree*. The Type of the node should be TYP_FLOAT // Return Value: // if the first field is located before second field, and they are located contiguously, // then return true. Otherwise, return false. bool Compiler::areFieldsContiguous(GenTree* first, GenTree* second) { assert(first->OperGet() == GT_FIELD); assert(second->OperGet() == GT_FIELD); assert(first->gtType == TYP_FLOAT); assert(second->gtType == TYP_FLOAT); var_types firstFieldType = first->gtType; var_types secondFieldType = second->gtType; unsigned firstFieldEndOffset = first->AsField()->gtFldOffset + genTypeSize(firstFieldType); unsigned secondFieldOffset = second->AsField()->gtFldOffset; if (firstFieldEndOffset == secondFieldOffset && firstFieldType == secondFieldType && areFieldsParentsLocatedSame(first, second)) { return true; } return false; } //---------------------------------------------------------------------- // areLocalFieldsContiguous: Check whether two local field are contiguous // // Arguments: // first - the first local field // second - the second local field // // Return Value: // If the first field is located before second field, and they are located contiguously, // then return true. Otherwise, return false. // bool Compiler::areLocalFieldsContiguous(GenTreeLclFld* first, GenTreeLclFld* second) { assert(first->TypeIs(TYP_FLOAT)); assert(second->TypeIs(TYP_FLOAT)); return (first->TypeGet() == second->TypeGet()) && (first->GetLclOffs() + genTypeSize(first->TypeGet()) == second->GetLclOffs()); } //------------------------------------------------------------------------------- // Check whether two array element nodes are located contiguously or not. // Arguments: // op1 - GenTree*. // op2 - GenTree*. // Return Value: // if the array element op1 is located before array element op2, and they are contiguous, // then return true. Otherwise, return false. // TODO-CQ: // Right this can only check array element with const number as index. In future, // we should consider to allow this function to check the index using expression. bool Compiler::areArrayElementsContiguous(GenTree* op1, GenTree* op2) { noway_assert(op1->gtOper == GT_INDEX); noway_assert(op2->gtOper == GT_INDEX); GenTreeIndex* op1Index = op1->AsIndex(); GenTreeIndex* op2Index = op2->AsIndex(); GenTree* op1ArrayRef = op1Index->Arr(); GenTree* op2ArrayRef = op2Index->Arr(); assert(op1ArrayRef->TypeGet() == TYP_REF); assert(op2ArrayRef->TypeGet() == TYP_REF); GenTree* op1IndexNode = op1Index->Index(); GenTree* op2IndexNode = op2Index->Index(); if ((op1IndexNode->OperGet() == GT_CNS_INT && op2IndexNode->OperGet() == GT_CNS_INT) && op1IndexNode->AsIntCon()->gtIconVal + 1 == op2IndexNode->AsIntCon()->gtIconVal) { if (op1ArrayRef->OperGet() == GT_FIELD && op2ArrayRef->OperGet() == GT_FIELD && areFieldsParentsLocatedSame(op1ArrayRef, op2ArrayRef)) { return true; } else if (op1ArrayRef->OperIsLocal() && op2ArrayRef->OperIsLocal() && op1ArrayRef->AsLclVarCommon()->GetLclNum() == op2ArrayRef->AsLclVarCommon()->GetLclNum()) { return true; } } return false; } //------------------------------------------------------------------------------- // Check whether two argument nodes are contiguous or not. // Arguments: // op1 - GenTree*. // op2 - GenTree*. // Return Value: // if the argument node op1 is located before argument node op2, and they are located contiguously, // then return true. Otherwise, return false. // TODO-CQ: // Right now this can only check field and array. In future we should add more cases. // bool Compiler::areArgumentsContiguous(GenTree* op1, GenTree* op2) { if (op1->OperGet() == GT_INDEX && op2->OperGet() == GT_INDEX) { return areArrayElementsContiguous(op1, op2); } else if (op1->OperGet() == GT_FIELD && op2->OperGet() == GT_FIELD) { return areFieldsContiguous(op1, op2); } else if (op1->OperIs(GT_LCL_FLD) && op2->OperIs(GT_LCL_FLD)) { return areLocalFieldsContiguous(op1->AsLclFld(), op2->AsLclFld()); } return false; } //-------------------------------------------------------------------------------------------------------- // createAddressNodeForSIMDInit: Generate the address node(GT_LEA) if we want to intialize vector2, vector3 or vector4 // from first argument's address. // // Arguments: // tree - GenTree*. This the tree node which is used to get the address for indir. // simdsize - unsigned. This the simd vector size. // arrayElementsCount - unsigned. This is used for generating the boundary check for array. // // Return value: // return the address node. // // TODO-CQ: // 1. Currently just support for GT_FIELD and GT_INDEX, because we can only verify the GT_INDEX node or GT_Field // are located contiguously or not. In future we should support more cases. // 2. Though it happens to just work fine front-end phases are not aware of GT_LEA node. Therefore, convert these // to use GT_ADDR. GenTree* Compiler::createAddressNodeForSIMDInit(GenTree* tree, unsigned simdSize) { assert(tree->OperGet() == GT_FIELD || tree->OperGet() == GT_INDEX); GenTree* byrefNode = nullptr; GenTree* startIndex = nullptr; unsigned offset = 0; var_types baseType = tree->gtType; if (tree->OperGet() == GT_FIELD) { GenTree* objRef = tree->AsField()->GetFldObj(); if (objRef != nullptr && objRef->gtOper == GT_ADDR) { GenTree* obj = objRef->AsOp()->gtOp1; // If the field is directly from a struct, then in this case, // we should set this struct's lvUsedInSIMDIntrinsic as true, // so that this sturct won't be promoted. // e.g. s.x x is a field, and s is a struct, then we should set the s's lvUsedInSIMDIntrinsic as true. // so that s won't be promoted. // Notice that if we have a case like s1.s2.x. s1 s2 are struct, and x is a field, then it is possible that // s1 can be promoted, so that s2 can be promoted. The reason for that is if we don't allow s1 to be // promoted, then this will affect the other optimizations which are depend on s1's struct promotion. // TODO-CQ: // In future, we should optimize this case so that if there is a nested field like s1.s2.x and s1.s2.x's // address is used for initializing the vector, then s1 can be promoted but s2 can't. if (varTypeIsSIMD(obj) && obj->OperIsLocal()) { setLclRelatedToSIMDIntrinsic(obj); } } byrefNode = gtCloneExpr(tree->AsField()->GetFldObj()); assert(byrefNode != nullptr); offset = tree->AsField()->gtFldOffset; } else if (tree->OperGet() == GT_INDEX) { GenTree* index = tree->AsIndex()->Index(); assert(index->OperGet() == GT_CNS_INT); GenTree* checkIndexExpr = nullptr; unsigned indexVal = (unsigned)(index->AsIntCon()->gtIconVal); offset = indexVal * genTypeSize(tree->TypeGet()); GenTree* arrayRef = tree->AsIndex()->Arr(); // Generate the boundary check exception. // The length for boundary check should be the maximum index number which should be // (first argument's index number) + (how many array arguments we have) - 1 // = indexVal + arrayElementsCount - 1 unsigned arrayElementsCount = simdSize / genTypeSize(baseType); checkIndexExpr = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, indexVal + arrayElementsCount - 1); GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, arrayRef, (int)OFFSETOF__CORINFO_Array__length, compCurBB); GenTreeBoundsChk* arrBndsChk = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(checkIndexExpr, arrLen, SCK_ARG_RNG_EXCPN); offset += OFFSETOF__CORINFO_Array__data; byrefNode = gtNewOperNode(GT_COMMA, arrayRef->TypeGet(), arrBndsChk, gtCloneExpr(arrayRef)); } else { unreached(); } GenTree* address = new (this, GT_LEA) GenTreeAddrMode(TYP_BYREF, byrefNode, startIndex, genTypeSize(tree->TypeGet()), offset); return address; } //------------------------------------------------------------------------------- // impMarkContiguousSIMDFieldAssignments: Try to identify if there are contiguous // assignments from SIMD field to memory. If there are, then mark the related // lclvar so that it won't be promoted. // // Arguments: // stmt - GenTree*. Input statement node. void Compiler::impMarkContiguousSIMDFieldAssignments(Statement* stmt) { if (!featureSIMD || opts.OptimizationDisabled()) { return; } GenTree* expr = stmt->GetRootNode(); if (expr->OperGet() == GT_ASG && expr->TypeGet() == TYP_FLOAT) { GenTree* curDst = expr->AsOp()->gtOp1; GenTree* curSrc = expr->AsOp()->gtOp2; unsigned index = 0; CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned simdSize = 0; GenTree* srcSimdStructNode = getSIMDStructFromField(curSrc, &simdBaseJitType, &index, &simdSize, true); if (srcSimdStructNode == nullptr || simdBaseJitType != CORINFO_TYPE_FLOAT) { fgPreviousCandidateSIMDFieldAsgStmt = nullptr; } else if (index == 0 && isSIMDTypeLocal(srcSimdStructNode)) { fgPreviousCandidateSIMDFieldAsgStmt = stmt; } else if (fgPreviousCandidateSIMDFieldAsgStmt != nullptr) { assert(index > 0); var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); GenTree* prevAsgExpr = fgPreviousCandidateSIMDFieldAsgStmt->GetRootNode(); GenTree* prevDst = prevAsgExpr->AsOp()->gtOp1; GenTree* prevSrc = prevAsgExpr->AsOp()->gtOp2; if (!areArgumentsContiguous(prevDst, curDst) || !areArgumentsContiguous(prevSrc, curSrc)) { fgPreviousCandidateSIMDFieldAsgStmt = nullptr; } else { if (index == (simdSize / genTypeSize(simdBaseType) - 1)) { // Successfully found the pattern, mark the lclvar as UsedInSIMDIntrinsic if (srcSimdStructNode->OperIsLocal()) { setLclRelatedToSIMDIntrinsic(srcSimdStructNode); } if (curDst->OperGet() == GT_FIELD) { GenTree* objRef = curDst->AsField()->GetFldObj(); if (objRef != nullptr && objRef->gtOper == GT_ADDR) { GenTree* obj = objRef->AsOp()->gtOp1; if (varTypeIsStruct(obj) && obj->OperIsLocal()) { setLclRelatedToSIMDIntrinsic(obj); } } } } else { fgPreviousCandidateSIMDFieldAsgStmt = stmt; } } } } else { fgPreviousCandidateSIMDFieldAsgStmt = nullptr; } } //------------------------------------------------------------------------ // impSIMDIntrinsic: Check method to see if it is a SIMD method // // Arguments: // opcode - the opcode being handled (needed to identify the CEE_NEWOBJ case) // newobjThis - For CEE_NEWOBJ, this is the temp grabbed for the allocated uninitalized object. // clsHnd - The handle of the class of the method. // method - The handle of the method. // sig - The call signature for the method. // memberRef - The memberRef token for the method reference. // // Return Value: // If clsHnd is a known SIMD type, and 'method' is one of the methods that are // implemented as an intrinsic in the JIT, then return the tree that implements // it. // GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE methodHnd, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef) { assert(featureSIMD); // Exit early if we are not in one of the SIMD types. if (!isSIMDClass(clsHnd)) { return nullptr; } // Exit early if the method is not a JIT Intrinsic (which requires the [Intrinsic] attribute). if ((methodFlags & CORINFO_FLG_INTRINSIC) == 0) { return nullptr; } // Get base type and intrinsic Id CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned size = 0; unsigned argCount = 0; const SIMDIntrinsicInfo* intrinsicInfo = getSIMDIntrinsicInfo(&clsHnd, methodHnd, sig, (opcode == CEE_NEWOBJ), &argCount, &simdBaseJitType, &size); // Exit early if the intrinsic is invalid or unrecognized if ((intrinsicInfo == nullptr) || (intrinsicInfo->id == SIMDIntrinsicInvalid)) { return nullptr; } if (!IsBaselineSimdIsaSupported()) { // The user disabled support for the baseline ISA so // don't emit any SIMD intrinsics as they all require // this at a minimum. We will, however, return false // for IsHardwareAccelerated as that will help with // dead code elimination. return (intrinsicInfo->id == SIMDIntrinsicHWAccel) ? gtNewIconNode(0, TYP_INT) : nullptr; } SIMDIntrinsicID simdIntrinsicID = intrinsicInfo->id; var_types simdBaseType; var_types simdType; if (simdBaseJitType != CORINFO_TYPE_UNDEF) { simdBaseType = JitType2PreciseVarType(simdBaseJitType); simdType = getSIMDTypeForSize(size); } else { assert(simdIntrinsicID == SIMDIntrinsicHWAccel); simdBaseType = TYP_UNKNOWN; simdType = TYP_UNKNOWN; } bool instMethod = intrinsicInfo->isInstMethod; var_types callType = JITtype2varType(sig->retType); if (callType == TYP_STRUCT) { // Note that here we are assuming that, if the call returns a struct, that it is the same size as the // struct on which the method is declared. This is currently true for all methods on Vector types, // but if this ever changes, we will need to determine the callType from the signature. assert(info.compCompHnd->getClassSize(sig->retTypeClass) == genTypeSize(simdType)); callType = simdType; } GenTree* simdTree = nullptr; GenTree* op1 = nullptr; GenTree* op2 = nullptr; GenTree* op3 = nullptr; GenTree* retVal = nullptr; GenTree* copyBlkDst = nullptr; bool doCopyBlk = false; switch (simdIntrinsicID) { case SIMDIntrinsicInit: case SIMDIntrinsicInitN: { // SIMDIntrinsicInit: // op2 - the initializer value // op1 - byref of vector // // SIMDIntrinsicInitN // op2 - list of initializer values stitched into a list // op1 - byref of vector IntrinsicNodeBuilder nodeBuilder(getAllocator(CMK_ASTNode), argCount - 1); bool initFromFirstArgIndir = false; if (simdIntrinsicID == SIMDIntrinsicInit) { op2 = impSIMDPopStack(simdBaseType); nodeBuilder.AddOperand(0, op2); } else { assert(simdIntrinsicID == SIMDIntrinsicInitN); assert(simdBaseType == TYP_FLOAT); unsigned initCount = argCount - 1; unsigned elementCount = getSIMDVectorLength(size, simdBaseType); noway_assert(initCount == elementCount); // Build an array with the N values. // We must maintain left-to-right order of the args, but we will pop // them off in reverse order (the Nth arg was pushed onto the stack last). GenTree* prevArg = nullptr; bool areArgsContiguous = true; for (unsigned i = 0; i < initCount; i++) { GenTree* arg = impSIMDPopStack(simdBaseType); if (areArgsContiguous) { GenTree* curArg = arg; if (prevArg != nullptr) { // Recall that we are popping the args off the stack in reverse order. areArgsContiguous = areArgumentsContiguous(curArg, prevArg); } prevArg = curArg; } assert(genActualType(arg) == genActualType(simdBaseType)); nodeBuilder.AddOperand(initCount - i - 1, arg); } if (areArgsContiguous && simdBaseType == TYP_FLOAT) { // Since Vector2, Vector3 and Vector4's arguments type are only float, // we intialize the vector from first argument address, only when // the simdBaseType is TYP_FLOAT and the arguments are located contiguously in memory initFromFirstArgIndir = true; GenTree* op2Address = createAddressNodeForSIMDInit(nodeBuilder.GetOperand(0), size); var_types simdType = getSIMDTypeForSize(size); op2 = gtNewOperNode(GT_IND, simdType, op2Address); } } op1 = getOp1ForConstructor(opcode, newobjThis, clsHnd); assert(op1->TypeGet() == TYP_BYREF); // For integral base types of size less than TYP_INT, expand the initializer // to fill size of TYP_INT bytes. if (varTypeIsSmallInt(simdBaseType)) { // This case should occur only for Init intrinsic. assert(simdIntrinsicID == SIMDIntrinsicInit); unsigned baseSize = genTypeSize(simdBaseType); int multiplier; if (baseSize == 1) { multiplier = 0x01010101; } else { assert(baseSize == 2); multiplier = 0x00010001; } GenTree* t1 = nullptr; if (simdBaseType == TYP_BYTE) { // What we have is a signed byte initializer, // which when loaded to a reg will get sign extended to TYP_INT. // But what we need is the initializer without sign extended or // rather zero extended to 32-bits. t1 = gtNewOperNode(GT_AND, TYP_INT, op2, gtNewIconNode(0xff, TYP_INT)); } else if (simdBaseType == TYP_SHORT) { // What we have is a signed short initializer, // which when loaded to a reg will get sign extended to TYP_INT. // But what we need is the initializer without sign extended or // rather zero extended to 32-bits. t1 = gtNewOperNode(GT_AND, TYP_INT, op2, gtNewIconNode(0xffff, TYP_INT)); } else { // TODO-Casts: this cast is useless. assert(simdBaseType == TYP_UBYTE || simdBaseType == TYP_USHORT); t1 = gtNewCastNode(TYP_INT, op2, false, TYP_INT); } assert(t1 != nullptr); GenTree* t2 = gtNewIconNode(multiplier, TYP_INT); op2 = gtNewOperNode(GT_MUL, TYP_INT, t1, t2); // Construct a vector of TYP_INT with the new initializer and cast it back to vector of simdBaseType simdTree = gtNewSIMDNode(simdType, op2, simdIntrinsicID, CORINFO_TYPE_INT, size); simdTree = gtNewSIMDNode(simdType, simdTree, SIMDIntrinsicCast, simdBaseJitType, size); } else { if (initFromFirstArgIndir) { simdTree = op2; if (op1->AsOp()->gtOp1->OperIsLocal()) { // label the dst struct's lclvar is used for SIMD intrinsic, // so that this dst struct won't be promoted. setLclRelatedToSIMDIntrinsic(op1->AsOp()->gtOp1); } } else { simdTree = new (this, GT_SIMD) GenTreeSIMD(simdType, std::move(nodeBuilder), simdIntrinsicID, simdBaseJitType, size); } } copyBlkDst = op1; doCopyBlk = true; } break; case SIMDIntrinsicInitArray: case SIMDIntrinsicInitArrayX: case SIMDIntrinsicCopyToArray: case SIMDIntrinsicCopyToArrayX: { // op3 - index into array in case of SIMDIntrinsicCopyToArrayX and SIMDIntrinsicInitArrayX // op2 - array itself // op1 - byref to vector struct unsigned int vectorLength = getSIMDVectorLength(size, simdBaseType); // (This constructor takes only the zero-based arrays.) // We will add one or two bounds checks: // 1. If we have an index, we must do a check on that first. // We can't combine it with the index + vectorLength check because // a. It might be negative, and b. It may need to raise a different exception // (captured as SCK_ARG_RNG_EXCPN for CopyTo and Init). // 2. We need to generate a check (SCK_ARG_EXCPN for CopyTo and Init) // for the last array element we will access. // We'll either check against (vectorLength - 1) or (index + vectorLength - 1). GenTree* checkIndexExpr = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, vectorLength - 1); // Get the index into the array. If it has been provided, it will be on the // top of the stack. Otherwise, it is null. if (argCount == 3) { op3 = impSIMDPopStack(TYP_INT); if (op3->IsIntegralConst(0)) { op3 = nullptr; } } else { // TODO-CQ: Here, or elsewhere, check for the pattern where op2 is a newly constructed array, and // change this to the InitN form. // op3 = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, 0); op3 = nullptr; } // Clone the array for use in the bounds check. op2 = impSIMDPopStack(TYP_REF); assert(op2->TypeGet() == TYP_REF); GenTree* arrayRefForArgChk = op2; GenTree* argRngChk = nullptr; if ((arrayRefForArgChk->gtFlags & GTF_SIDE_EFFECT) != 0) { op2 = fgInsertCommaFormTemp(&arrayRefForArgChk); } else { op2 = gtCloneExpr(arrayRefForArgChk); } assert(op2 != nullptr); if (op3 != nullptr) { // We need to use the original expression on this, which is the first check. GenTree* arrayRefForArgRngChk = arrayRefForArgChk; // Then we clone the clone we just made for the next check. arrayRefForArgChk = gtCloneExpr(op2); // We know we MUST have had a cloneable expression. assert(arrayRefForArgChk != nullptr); GenTree* index = op3; if ((index->gtFlags & GTF_SIDE_EFFECT) != 0) { op3 = fgInsertCommaFormTemp(&index); } else { op3 = gtCloneExpr(index); } GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, arrayRefForArgRngChk, (int)OFFSETOF__CORINFO_Array__length, compCurBB); argRngChk = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, arrLen, SCK_ARG_RNG_EXCPN); // Now, clone op3 to create another node for the argChk GenTree* index2 = gtCloneExpr(op3); assert(index != nullptr); checkIndexExpr = gtNewOperNode(GT_ADD, TYP_INT, index2, checkIndexExpr); } // Insert a bounds check for index + offset - 1. // This must be a "normal" array. SpecialCodeKind op2CheckKind; if (simdIntrinsicID == SIMDIntrinsicInitArray || simdIntrinsicID == SIMDIntrinsicInitArrayX) { op2CheckKind = SCK_ARG_RNG_EXCPN; } else { op2CheckKind = SCK_ARG_EXCPN; } GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, arrayRefForArgChk, (int)OFFSETOF__CORINFO_Array__length, compCurBB); GenTreeBoundsChk* argChk = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(checkIndexExpr, arrLen, op2CheckKind); // Create a GT_COMMA tree for the bounds check(s). op2 = gtNewOperNode(GT_COMMA, op2->TypeGet(), argChk, op2); if (argRngChk != nullptr) { op2 = gtNewOperNode(GT_COMMA, op2->TypeGet(), argRngChk, op2); } if (simdIntrinsicID == SIMDIntrinsicInitArray || simdIntrinsicID == SIMDIntrinsicInitArrayX) { op1 = getOp1ForConstructor(opcode, newobjThis, clsHnd); simdTree = (op3 != nullptr) ? gtNewSIMDNode(simdType, op2, op3, SIMDIntrinsicInitArray, simdBaseJitType, size) : gtNewSIMDNode(simdType, op2, SIMDIntrinsicInitArray, simdBaseJitType, size); copyBlkDst = op1; doCopyBlk = true; } else { assert(simdIntrinsicID == SIMDIntrinsicCopyToArray || simdIntrinsicID == SIMDIntrinsicCopyToArrayX); op1 = impSIMDPopStack(simdType, instMethod); assert(op1->TypeGet() == simdType); // copy vector (op1) to array (op2) starting at index (op3) simdTree = op1; // TODO-Cleanup: Though it happens to just work fine front-end phases are not aware of GT_LEA node. // Therefore, convert these to use GT_ADDR . copyBlkDst = new (this, GT_LEA) GenTreeAddrMode(TYP_BYREF, op2, op3, genTypeSize(simdBaseType), OFFSETOF__CORINFO_Array__data); doCopyBlk = true; } } break; case SIMDIntrinsicInitFixed: { // We are initializing a fixed-length vector VLarge with a smaller fixed-length vector VSmall, plus 1 or 2 // additional floats. // op4 (optional) - float value for VLarge.W, if VLarge is Vector4, and VSmall is Vector2 // op3 - float value for VLarge.Z or VLarge.W // op2 - VSmall // op1 - byref of VLarge assert(simdBaseType == TYP_FLOAT); GenTree* op4 = nullptr; if (argCount == 4) { op4 = impSIMDPopStack(TYP_FLOAT); assert(op4->TypeGet() == TYP_FLOAT); } op3 = impSIMDPopStack(TYP_FLOAT); assert(op3->TypeGet() == TYP_FLOAT); // The input vector will either be TYP_SIMD8 or TYP_SIMD12. var_types smallSIMDType = TYP_SIMD8; if ((op4 == nullptr) && (simdType == TYP_SIMD16)) { smallSIMDType = TYP_SIMD12; } op2 = impSIMDPopStack(smallSIMDType); op1 = getOp1ForConstructor(opcode, newobjThis, clsHnd); // We are going to redefine the operands so that: // - op3 is the value that's going into the Z position, or null if it's a Vector4 constructor with a single // operand, and // - op4 is the W position value, or null if this is a Vector3 constructor. if (size == 16 && argCount == 3) { op4 = op3; op3 = nullptr; } simdTree = op2; if (op3 != nullptr) { simdTree = gtNewSimdWithElementNode(simdType, simdTree, gtNewIconNode(2, TYP_INT), op3, simdBaseJitType, size, /* isSimdAsHWIntrinsic */ true); } if (op4 != nullptr) { simdTree = gtNewSimdWithElementNode(simdType, simdTree, gtNewIconNode(3, TYP_INT), op4, simdBaseJitType, size, /* isSimdAsHWIntrinsic */ true); } copyBlkDst = op1; doCopyBlk = true; } break; case SIMDIntrinsicEqual: { op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType, instMethod); SIMDIntrinsicID intrinsicID = impSIMDRelOp(simdIntrinsicID, clsHnd, size, &simdBaseJitType, &op1, &op2); simdTree = gtNewSIMDNode(genActualType(callType), op1, op2, intrinsicID, simdBaseJitType, size); retVal = simdTree; } break; case SIMDIntrinsicSub: case SIMDIntrinsicBitwiseAnd: case SIMDIntrinsicBitwiseOr: { // op1 is the first operand; if instance method, op1 is "this" arg // op2 is the second operand op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType, instMethod); simdTree = gtNewSIMDNode(simdType, op1, op2, simdIntrinsicID, simdBaseJitType, size); retVal = simdTree; } break; // Unary operators that take and return a Vector. case SIMDIntrinsicCast: { op1 = impSIMDPopStack(simdType, instMethod); simdTree = gtNewSIMDNode(simdType, op1, simdIntrinsicID, simdBaseJitType, size); retVal = simdTree; } break; case SIMDIntrinsicHWAccel: { GenTreeIntCon* intConstTree = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, 1); retVal = intConstTree; } break; default: assert(!"Unimplemented SIMD Intrinsic"); return nullptr; } #if defined(TARGET_XARCH) || defined(TARGET_ARM64) // XArch/Arm64: also indicate that we use floating point registers. // The need for setting this here is that a method may not have SIMD // type lclvars, but might be exercising SIMD intrinsics on fields of // SIMD type. // // e.g. public Vector<float> ComplexVecFloat::sqabs() { return this.r * this.r + this.i * this.i; } compFloatingPointUsed = true; #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) // At this point, we have a tree that we are going to store into a destination. // TODO-1stClassStructs: This should be a simple store or assignment, and should not require // GTF_ALL_EFFECT for the dest. This is currently emulating the previous behavior of // block ops. if (doCopyBlk) { GenTree* dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, simdType, copyBlkDst, typGetBlkLayout(getSIMDTypeSizeInBytes(clsHnd))); dest->gtFlags |= GTF_GLOB_REF; retVal = gtNewBlkOpNode(dest, simdTree, false, // not volatile true); // copyBlock retVal->gtFlags |= ((simdTree->gtFlags | copyBlkDst->gtFlags) & GTF_ALL_EFFECT); } return retVal; } #endif // FEATURE_SIMD
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // SIMD Support // // IMPORTANT NOTES AND CAVEATS: // // This implementation is preliminary, and may change dramatically. // // New JIT types, TYP_SIMDxx, are introduced, and the SIMD intrinsics are created as GT_SIMD nodes. // Nodes of SIMD types will be typed as TYP_SIMD* (e.g. TYP_SIMD8, TYP_SIMD16, etc.). // // Note that currently the "reference implementation" is the same as the runtime dll. As such, it is currently // providing implementations for those methods not currently supported by the JIT as intrinsics. // // These are currently recognized using string compares, in order to provide an implementation in the JIT // without taking a dependency on the VM. // Furthermore, in the CTP, in order to limit the impact of doing these string compares // against assembly names, we only look for the SIMDVector assembly if we are compiling a class constructor. This // makes it somewhat more "pay for play" but is a significant usability compromise. // This has been addressed for RTM by doing the assembly recognition in the VM. // -------------------------------------------------------------------------------------- #include "jitpch.h" #include "simd.h" #ifdef _MSC_VER #pragma hdrstop #endif #ifdef FEATURE_SIMD // Intrinsic Id to intrinsic info map const SIMDIntrinsicInfo simdIntrinsicInfoArray[] = { #define SIMD_INTRINSIC(mname, inst, id, name, retType, argCount, arg1, arg2, arg3, t1, t2, t3, t4, t5, t6, t7, t8, t9, \ t10) \ {SIMDIntrinsic##id, mname, inst, retType, argCount, arg1, arg2, arg3, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10}, #include "simdintrinsiclist.h" }; //------------------------------------------------------------------------ // getSIMDVectorLength: Get the length (number of elements of base type) of // SIMD Vector given its size and base (element) type. // // Arguments: // simdSize - size of the SIMD vector // baseType - type of the elements of the SIMD vector // // static int Compiler::getSIMDVectorLength(unsigned simdSize, var_types baseType) { return simdSize / genTypeSize(baseType); } //------------------------------------------------------------------------ // Get the length (number of elements of base type) of SIMD Vector given by typeHnd. // // Arguments: // typeHnd - type handle of the SIMD vector // int Compiler::getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd) { unsigned sizeBytes = 0; CorInfoType baseJitType = getBaseJitTypeAndSizeOfSIMDType(typeHnd, &sizeBytes); var_types baseType = JitType2PreciseVarType(baseJitType); return getSIMDVectorLength(sizeBytes, baseType); } //------------------------------------------------------------------------ // Get the preferred alignment of SIMD vector type for better performance. // // Arguments: // typeHnd - type handle of the SIMD vector // int Compiler::getSIMDTypeAlignment(var_types simdType) { unsigned size = genTypeSize(simdType); #ifdef TARGET_XARCH // Fixed length vectors have the following alignment preference // Vector2 = 8 byte alignment // Vector3/4 = 16-byte alignment // preferred alignment for SSE2 128-bit vectors is 16-bytes if (size == 8) { return 8; } else if (size <= 16) { assert((size == 12) || (size == 16)); return 16; } else { assert(size == 32); return 32; } #elif defined(TARGET_ARM64) // preferred alignment for 64-bit vectors is 8-bytes. // For everything else, 16-bytes. return (size == 8) ? 8 : 16; #else assert(!"getSIMDTypeAlignment() unimplemented on target arch"); unreached(); #endif } //------------------------------------------------------------------------ // Get, and allocate if necessary, the SIMD temp used for various operations. // The temp is allocated as the maximum sized type of all operations required. // // Arguments: // simdType - Required SIMD type // // Returns: // The temp number // unsigned Compiler::getSIMDInitTempVarNum(var_types simdType) { if (lvaSIMDInitTempVarNum == BAD_VAR_NUM) { JITDUMP("Allocating SIMDInitTempVar as %s\n", varTypeName(simdType)); lvaSIMDInitTempVarNum = lvaGrabTempWithImplicitUse(false DEBUGARG("SIMDInitTempVar")); lvaTable[lvaSIMDInitTempVarNum].lvType = simdType; } else if (genTypeSize(lvaTable[lvaSIMDInitTempVarNum].lvType) < genTypeSize(simdType)) { // We want the largest required type size for the temp. JITDUMP("Increasing SIMDInitTempVar type size from %s to %s\n", varTypeName(lvaTable[lvaSIMDInitTempVarNum].lvType), varTypeName(simdType)); lvaTable[lvaSIMDInitTempVarNum].lvType = simdType; } return lvaSIMDInitTempVarNum; } //---------------------------------------------------------------------------------- // Return the base type and size of SIMD vector type given its type handle. // // Arguments: // typeHnd - The handle of the type we're interested in. // sizeBytes - out param // // Return Value: // base type of SIMD vector. // sizeBytes if non-null is set to size in bytes. // // Notes: // If the size of the struct is already known call structSizeMightRepresentSIMDType // to determine if this api needs to be called. // // TODO-Throughput: current implementation parses class name to find base type. Change // this when we implement SIMD intrinsic identification for the final // product. CorInfoType Compiler::getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes /*= nullptr */) { assert(supportSIMDTypes()); if (m_simdHandleCache == nullptr) { if (impInlineInfo == nullptr) { m_simdHandleCache = new (this, CMK_Generic) SIMDHandlesCache(); } else { // Steal the inliner compiler's cache (create it if not available). if (impInlineInfo->InlineRoot->m_simdHandleCache == nullptr) { impInlineInfo->InlineRoot->m_simdHandleCache = new (this, CMK_Generic) SIMDHandlesCache(); } m_simdHandleCache = impInlineInfo->InlineRoot->m_simdHandleCache; } } if (typeHnd == nullptr) { return CORINFO_TYPE_UNDEF; } // fast path search using cached type handles of important types CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned size = 0; // TODO - Optimize SIMD type recognition by IntrinsicAttribute if (isSIMDClass(typeHnd)) { // The most likely to be used type handles are looked up first followed by // less likely to be used type handles if (typeHnd == m_simdHandleCache->SIMDFloatHandle) { simdBaseJitType = CORINFO_TYPE_FLOAT; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<Float>\n"); } else if (typeHnd == m_simdHandleCache->SIMDIntHandle) { simdBaseJitType = CORINFO_TYPE_INT; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<Int>\n"); } else if (typeHnd == m_simdHandleCache->SIMDVector2Handle) { simdBaseJitType = CORINFO_TYPE_FLOAT; size = 2 * genTypeSize(TYP_FLOAT); assert(size == roundUp(info.compCompHnd->getClassSize(typeHnd), TARGET_POINTER_SIZE)); JITDUMP(" Known type Vector2\n"); } else if (typeHnd == m_simdHandleCache->SIMDVector3Handle) { simdBaseJitType = CORINFO_TYPE_FLOAT; size = 3 * genTypeSize(TYP_FLOAT); assert(size == info.compCompHnd->getClassSize(typeHnd)); JITDUMP(" Known type Vector3\n"); } else if (typeHnd == m_simdHandleCache->SIMDVector4Handle) { simdBaseJitType = CORINFO_TYPE_FLOAT; size = 4 * genTypeSize(TYP_FLOAT); assert(size == roundUp(info.compCompHnd->getClassSize(typeHnd), TARGET_POINTER_SIZE)); JITDUMP(" Known type Vector4\n"); } else if (typeHnd == m_simdHandleCache->SIMDVectorHandle) { size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type Vector\n"); } else if (typeHnd == m_simdHandleCache->SIMDUShortHandle) { simdBaseJitType = CORINFO_TYPE_USHORT; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<ushort>\n"); } else if (typeHnd == m_simdHandleCache->SIMDUByteHandle) { simdBaseJitType = CORINFO_TYPE_UBYTE; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<ubyte>\n"); } else if (typeHnd == m_simdHandleCache->SIMDDoubleHandle) { simdBaseJitType = CORINFO_TYPE_DOUBLE; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<Double>\n"); } else if (typeHnd == m_simdHandleCache->SIMDLongHandle) { simdBaseJitType = CORINFO_TYPE_LONG; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<Long>\n"); } else if (typeHnd == m_simdHandleCache->SIMDShortHandle) { simdBaseJitType = CORINFO_TYPE_SHORT; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<short>\n"); } else if (typeHnd == m_simdHandleCache->SIMDByteHandle) { simdBaseJitType = CORINFO_TYPE_BYTE; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<byte>\n"); } else if (typeHnd == m_simdHandleCache->SIMDUIntHandle) { simdBaseJitType = CORINFO_TYPE_UINT; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<uint>\n"); } else if (typeHnd == m_simdHandleCache->SIMDULongHandle) { simdBaseJitType = CORINFO_TYPE_ULONG; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<ulong>\n"); } else if (typeHnd == m_simdHandleCache->SIMDNIntHandle) { simdBaseJitType = CORINFO_TYPE_NATIVEINT; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<nint>\n"); } else if (typeHnd == m_simdHandleCache->SIMDNUIntHandle) { simdBaseJitType = CORINFO_TYPE_NATIVEUINT; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Known type SIMD Vector<nuint>\n"); } // slow path search if (simdBaseJitType == CORINFO_TYPE_UNDEF) { // Doesn't match with any of the cached type handles. // Obtain base type by parsing fully qualified class name. // // TODO-Throughput: implement product shipping solution to query base type. WCHAR className[256] = {0}; WCHAR* pbuf = &className[0]; int len = ArrLen(className); info.compCompHnd->appendClassName((char16_t**)&pbuf, &len, typeHnd, true, false, false); noway_assert(pbuf < &className[256]); JITDUMP("SIMD Candidate Type %S\n", className); if (wcsncmp(className, W("System.Numerics."), 16) == 0) { if (wcsncmp(&(className[16]), W("Vector`1["), 9) == 0) { size = getSIMDVectorRegisterByteLength(); if (wcsncmp(&(className[25]), W("System.Single"), 13) == 0) { m_simdHandleCache->SIMDFloatHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_FLOAT; JITDUMP(" Found type SIMD Vector<Float>\n"); } else if (wcsncmp(&(className[25]), W("System.Int32"), 12) == 0) { m_simdHandleCache->SIMDIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_INT; JITDUMP(" Found type SIMD Vector<Int>\n"); } else if (wcsncmp(&(className[25]), W("System.UInt16"), 13) == 0) { m_simdHandleCache->SIMDUShortHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_USHORT; JITDUMP(" Found type SIMD Vector<ushort>\n"); } else if (wcsncmp(&(className[25]), W("System.Byte"), 11) == 0) { m_simdHandleCache->SIMDUByteHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_UBYTE; JITDUMP(" Found type SIMD Vector<ubyte>\n"); } else if (wcsncmp(&(className[25]), W("System.Double"), 13) == 0) { m_simdHandleCache->SIMDDoubleHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_DOUBLE; JITDUMP(" Found type SIMD Vector<Double>\n"); } else if (wcsncmp(&(className[25]), W("System.Int64"), 12) == 0) { m_simdHandleCache->SIMDLongHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_LONG; JITDUMP(" Found type SIMD Vector<Long>\n"); } else if (wcsncmp(&(className[25]), W("System.Int16"), 12) == 0) { m_simdHandleCache->SIMDShortHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_SHORT; JITDUMP(" Found type SIMD Vector<short>\n"); } else if (wcsncmp(&(className[25]), W("System.SByte"), 12) == 0) { m_simdHandleCache->SIMDByteHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_BYTE; JITDUMP(" Found type SIMD Vector<byte>\n"); } else if (wcsncmp(&(className[25]), W("System.UInt32"), 13) == 0) { m_simdHandleCache->SIMDUIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_UINT; JITDUMP(" Found type SIMD Vector<uint>\n"); } else if (wcsncmp(&(className[25]), W("System.UInt64"), 13) == 0) { m_simdHandleCache->SIMDULongHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_ULONG; JITDUMP(" Found type SIMD Vector<ulong>\n"); } else if (wcsncmp(&(className[25]), W("System.IntPtr"), 13) == 0) { m_simdHandleCache->SIMDNIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_NATIVEINT; JITDUMP(" Found type SIMD Vector<nint>\n"); } else if (wcsncmp(&(className[25]), W("System.UIntPtr"), 14) == 0) { m_simdHandleCache->SIMDNUIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_NATIVEUINT; JITDUMP(" Found type SIMD Vector<nuint>\n"); } else { JITDUMP(" Unknown SIMD Vector<T>\n"); } } else if (wcsncmp(&(className[16]), W("Vector2"), 8) == 0) { m_simdHandleCache->SIMDVector2Handle = typeHnd; simdBaseJitType = CORINFO_TYPE_FLOAT; size = 2 * genTypeSize(TYP_FLOAT); assert(size == roundUp(info.compCompHnd->getClassSize(typeHnd), TARGET_POINTER_SIZE)); JITDUMP(" Found Vector2\n"); } else if (wcsncmp(&(className[16]), W("Vector3"), 8) == 0) { m_simdHandleCache->SIMDVector3Handle = typeHnd; simdBaseJitType = CORINFO_TYPE_FLOAT; size = 3 * genTypeSize(TYP_FLOAT); assert(size == info.compCompHnd->getClassSize(typeHnd)); JITDUMP(" Found Vector3\n"); } else if (wcsncmp(&(className[16]), W("Vector4"), 8) == 0) { m_simdHandleCache->SIMDVector4Handle = typeHnd; simdBaseJitType = CORINFO_TYPE_FLOAT; size = 4 * genTypeSize(TYP_FLOAT); assert(size == roundUp(info.compCompHnd->getClassSize(typeHnd), TARGET_POINTER_SIZE)); JITDUMP(" Found Vector4\n"); } else if (wcsncmp(&(className[16]), W("Vector"), 6) == 0) { m_simdHandleCache->SIMDVectorHandle = typeHnd; size = getSIMDVectorRegisterByteLength(); JITDUMP(" Found type Vector\n"); } else { JITDUMP(" Unknown SIMD Type\n"); } } } } #ifdef FEATURE_HW_INTRINSICS else if (isIntrinsicType(typeHnd)) { const size_t Vector64SizeBytes = 64 / 8; const size_t Vector128SizeBytes = 128 / 8; const size_t Vector256SizeBytes = 256 / 8; #if defined(TARGET_XARCH) static_assert_no_msg(YMM_REGSIZE_BYTES == Vector256SizeBytes); static_assert_no_msg(XMM_REGSIZE_BYTES == Vector128SizeBytes); if (typeHnd == m_simdHandleCache->Vector256FloatHandle) { simdBaseJitType = CORINFO_TYPE_FLOAT; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<float>\n"); } else if (typeHnd == m_simdHandleCache->Vector256DoubleHandle) { simdBaseJitType = CORINFO_TYPE_DOUBLE; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<double>\n"); } else if (typeHnd == m_simdHandleCache->Vector256IntHandle) { simdBaseJitType = CORINFO_TYPE_INT; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<int>\n"); } else if (typeHnd == m_simdHandleCache->Vector256UIntHandle) { simdBaseJitType = CORINFO_TYPE_UINT; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<uint>\n"); } else if (typeHnd == m_simdHandleCache->Vector256ShortHandle) { simdBaseJitType = CORINFO_TYPE_SHORT; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<short>\n"); } else if (typeHnd == m_simdHandleCache->Vector256UShortHandle) { simdBaseJitType = CORINFO_TYPE_USHORT; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<ushort>\n"); } else if (typeHnd == m_simdHandleCache->Vector256ByteHandle) { simdBaseJitType = CORINFO_TYPE_BYTE; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<sbyte>\n"); } else if (typeHnd == m_simdHandleCache->Vector256UByteHandle) { simdBaseJitType = CORINFO_TYPE_UBYTE; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<byte>\n"); } else if (typeHnd == m_simdHandleCache->Vector256LongHandle) { simdBaseJitType = CORINFO_TYPE_LONG; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<long>\n"); } else if (typeHnd == m_simdHandleCache->Vector256ULongHandle) { simdBaseJitType = CORINFO_TYPE_ULONG; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<ulong>\n"); } else if (typeHnd == m_simdHandleCache->Vector256NIntHandle) { simdBaseJitType = CORINFO_TYPE_NATIVEINT; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<nint>\n"); } else if (typeHnd == m_simdHandleCache->Vector256NUIntHandle) { simdBaseJitType = CORINFO_TYPE_NATIVEUINT; size = Vector256SizeBytes; JITDUMP(" Known type Vector256<nuint>\n"); } else #endif // defined(TARGET_XARCH) if (typeHnd == m_simdHandleCache->Vector128FloatHandle) { simdBaseJitType = CORINFO_TYPE_FLOAT; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<float>\n"); } else if (typeHnd == m_simdHandleCache->Vector128DoubleHandle) { simdBaseJitType = CORINFO_TYPE_DOUBLE; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<double>\n"); } else if (typeHnd == m_simdHandleCache->Vector128IntHandle) { simdBaseJitType = CORINFO_TYPE_INT; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<int>\n"); } else if (typeHnd == m_simdHandleCache->Vector128UIntHandle) { simdBaseJitType = CORINFO_TYPE_UINT; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<uint>\n"); } else if (typeHnd == m_simdHandleCache->Vector128ShortHandle) { simdBaseJitType = CORINFO_TYPE_SHORT; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<short>\n"); } else if (typeHnd == m_simdHandleCache->Vector128UShortHandle) { simdBaseJitType = CORINFO_TYPE_USHORT; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<ushort>\n"); } else if (typeHnd == m_simdHandleCache->Vector128ByteHandle) { simdBaseJitType = CORINFO_TYPE_BYTE; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<sbyte>\n"); } else if (typeHnd == m_simdHandleCache->Vector128UByteHandle) { simdBaseJitType = CORINFO_TYPE_UBYTE; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<byte>\n"); } else if (typeHnd == m_simdHandleCache->Vector128LongHandle) { simdBaseJitType = CORINFO_TYPE_LONG; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<long>\n"); } else if (typeHnd == m_simdHandleCache->Vector128ULongHandle) { simdBaseJitType = CORINFO_TYPE_ULONG; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<ulong>\n"); } else if (typeHnd == m_simdHandleCache->Vector128NIntHandle) { simdBaseJitType = CORINFO_TYPE_NATIVEINT; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<nint>\n"); } else if (typeHnd == m_simdHandleCache->Vector128NUIntHandle) { simdBaseJitType = CORINFO_TYPE_NATIVEUINT; size = Vector128SizeBytes; JITDUMP(" Known type Vector128<nuint>\n"); } else #if defined(TARGET_ARM64) if (typeHnd == m_simdHandleCache->Vector64FloatHandle) { simdBaseJitType = CORINFO_TYPE_FLOAT; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<float>\n"); } else if (typeHnd == m_simdHandleCache->Vector64DoubleHandle) { simdBaseJitType = CORINFO_TYPE_DOUBLE; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<double>\n"); } else if (typeHnd == m_simdHandleCache->Vector64IntHandle) { simdBaseJitType = CORINFO_TYPE_INT; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<int>\n"); } else if (typeHnd == m_simdHandleCache->Vector64UIntHandle) { simdBaseJitType = CORINFO_TYPE_UINT; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<uint>\n"); } else if (typeHnd == m_simdHandleCache->Vector64ShortHandle) { simdBaseJitType = CORINFO_TYPE_SHORT; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<short>\n"); } else if (typeHnd == m_simdHandleCache->Vector64UShortHandle) { simdBaseJitType = CORINFO_TYPE_USHORT; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<ushort>\n"); } else if (typeHnd == m_simdHandleCache->Vector64ByteHandle) { simdBaseJitType = CORINFO_TYPE_BYTE; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<sbyte>\n"); } else if (typeHnd == m_simdHandleCache->Vector64UByteHandle) { simdBaseJitType = CORINFO_TYPE_UBYTE; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<byte>\n"); } else if (typeHnd == m_simdHandleCache->Vector64LongHandle) { simdBaseJitType = CORINFO_TYPE_LONG; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<long>\n"); } else if (typeHnd == m_simdHandleCache->Vector64ULongHandle) { simdBaseJitType = CORINFO_TYPE_ULONG; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<ulong>\n"); } else if (typeHnd == m_simdHandleCache->Vector64NIntHandle) { simdBaseJitType = CORINFO_TYPE_NATIVEINT; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<nint>\n"); } else if (typeHnd == m_simdHandleCache->Vector64NUIntHandle) { simdBaseJitType = CORINFO_TYPE_NATIVEUINT; size = Vector64SizeBytes; JITDUMP(" Known type Vector64<nuint>\n"); } #endif // defined(TARGET_ARM64) // slow path search if (simdBaseJitType == CORINFO_TYPE_UNDEF) { // Doesn't match with any of the cached type handles. const char* className = getClassNameFromMetadata(typeHnd, nullptr); CORINFO_CLASS_HANDLE baseTypeHnd = getTypeInstantiationArgument(typeHnd, 0); if (baseTypeHnd != nullptr) { CorInfoType type = info.compCompHnd->getTypeForPrimitiveNumericClass(baseTypeHnd); JITDUMP("HW Intrinsic SIMD Candidate Type %s with Base Type %s\n", className, getClassNameFromMetadata(baseTypeHnd, nullptr)); #if defined(TARGET_XARCH) if (strcmp(className, "Vector256`1") == 0) { size = Vector256SizeBytes; switch (type) { case CORINFO_TYPE_FLOAT: m_simdHandleCache->Vector256FloatHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_FLOAT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<float>\n"); break; case CORINFO_TYPE_DOUBLE: m_simdHandleCache->Vector256DoubleHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_DOUBLE; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<double>\n"); break; case CORINFO_TYPE_INT: m_simdHandleCache->Vector256IntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_INT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<int>\n"); break; case CORINFO_TYPE_UINT: m_simdHandleCache->Vector256UIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_UINT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<uint>\n"); break; case CORINFO_TYPE_SHORT: m_simdHandleCache->Vector256ShortHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_SHORT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<short>\n"); break; case CORINFO_TYPE_USHORT: m_simdHandleCache->Vector256UShortHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_USHORT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<ushort>\n"); break; case CORINFO_TYPE_LONG: m_simdHandleCache->Vector256LongHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_LONG; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<long>\n"); break; case CORINFO_TYPE_ULONG: m_simdHandleCache->Vector256ULongHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_ULONG; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<ulong>\n"); break; case CORINFO_TYPE_UBYTE: m_simdHandleCache->Vector256UByteHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_UBYTE; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<byte>\n"); break; case CORINFO_TYPE_BYTE: m_simdHandleCache->Vector256ByteHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_BYTE; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<sbyte>\n"); break; case CORINFO_TYPE_NATIVEINT: m_simdHandleCache->Vector256NIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_NATIVEINT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<nint>\n"); break; case CORINFO_TYPE_NATIVEUINT: m_simdHandleCache->Vector256NUIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_NATIVEUINT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<nuint>\n"); break; default: JITDUMP(" Unknown Hardware Intrinsic SIMD Type Vector256<T>\n"); } } else #endif // defined(TARGET_XARCH) if (strcmp(className, "Vector128`1") == 0) { size = Vector128SizeBytes; switch (type) { case CORINFO_TYPE_FLOAT: m_simdHandleCache->Vector128FloatHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_FLOAT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<float>\n"); break; case CORINFO_TYPE_DOUBLE: m_simdHandleCache->Vector128DoubleHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_DOUBLE; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<double>\n"); break; case CORINFO_TYPE_INT: m_simdHandleCache->Vector128IntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_INT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<int>\n"); break; case CORINFO_TYPE_UINT: m_simdHandleCache->Vector128UIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_UINT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<uint>\n"); break; case CORINFO_TYPE_SHORT: m_simdHandleCache->Vector128ShortHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_SHORT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<short>\n"); break; case CORINFO_TYPE_USHORT: m_simdHandleCache->Vector128UShortHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_USHORT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<ushort>\n"); break; case CORINFO_TYPE_LONG: m_simdHandleCache->Vector128LongHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_LONG; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<long>\n"); break; case CORINFO_TYPE_ULONG: m_simdHandleCache->Vector128ULongHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_ULONG; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<ulong>\n"); break; case CORINFO_TYPE_UBYTE: m_simdHandleCache->Vector128UByteHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_UBYTE; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<byte>\n"); break; case CORINFO_TYPE_BYTE: m_simdHandleCache->Vector128ByteHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_BYTE; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<sbyte>\n"); break; case CORINFO_TYPE_NATIVEINT: m_simdHandleCache->Vector128NIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_NATIVEINT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<nint>\n"); break; case CORINFO_TYPE_NATIVEUINT: m_simdHandleCache->Vector128NUIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_NATIVEUINT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<nuint>\n"); break; default: JITDUMP(" Unknown Hardware Intrinsic SIMD Type Vector128<T>\n"); } } #if defined(TARGET_ARM64) else if (strcmp(className, "Vector64`1") == 0) { size = Vector64SizeBytes; switch (type) { case CORINFO_TYPE_FLOAT: m_simdHandleCache->Vector64FloatHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_FLOAT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<float>\n"); break; case CORINFO_TYPE_DOUBLE: m_simdHandleCache->Vector64DoubleHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_DOUBLE; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<double>\n"); break; case CORINFO_TYPE_INT: m_simdHandleCache->Vector64IntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_INT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<int>\n"); break; case CORINFO_TYPE_UINT: m_simdHandleCache->Vector64UIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_UINT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<uint>\n"); break; case CORINFO_TYPE_SHORT: m_simdHandleCache->Vector64ShortHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_SHORT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<short>\n"); break; case CORINFO_TYPE_USHORT: m_simdHandleCache->Vector64UShortHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_USHORT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<ushort>\n"); break; case CORINFO_TYPE_LONG: m_simdHandleCache->Vector64LongHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_LONG; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<long>\n"); break; case CORINFO_TYPE_ULONG: m_simdHandleCache->Vector64ULongHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_ULONG; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<ulong>\n"); break; case CORINFO_TYPE_UBYTE: m_simdHandleCache->Vector64UByteHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_UBYTE; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<byte>\n"); break; case CORINFO_TYPE_BYTE: m_simdHandleCache->Vector64ByteHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_BYTE; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<sbyte>\n"); break; case CORINFO_TYPE_NATIVEINT: m_simdHandleCache->Vector64NIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_NATIVEINT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<nint>\n"); break; case CORINFO_TYPE_NATIVEUINT: m_simdHandleCache->Vector64NUIntHandle = typeHnd; simdBaseJitType = CORINFO_TYPE_NATIVEUINT; JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<nuint>\n"); break; default: JITDUMP(" Unknown Hardware Intrinsic SIMD Type Vector64<T>\n"); } } #endif // defined(TARGET_ARM64) } } #if defined(TARGET_XARCH) // Even though Vector256 is TYP_SIMD32, if AVX isn't supported, then it must // be treated as a regular struct if (size == YMM_REGSIZE_BYTES && (simdBaseJitType != CORINFO_TYPE_UNDEF) && !compExactlyDependsOn(InstructionSet_AVX)) { simdBaseJitType = CORINFO_TYPE_UNDEF; } #endif // TARGET_XARCH } #endif // FEATURE_HW_INTRINSICS if (sizeBytes != nullptr) { *sizeBytes = size; } if (simdBaseJitType != CORINFO_TYPE_UNDEF) { setUsesSIMDTypes(true); } return simdBaseJitType; } //-------------------------------------------------------------------------------------- // getSIMDIntrinsicInfo: get SIMD intrinsic info given the method handle. // // Arguments: // inOutTypeHnd - The handle of the type on which the method is invoked. This is an in-out param. // methodHnd - The handle of the method we're interested in. // sig - method signature info // isNewObj - whether this call represents a newboj constructor call // argCount - argument count - out pram // simdBaseJitType - base JIT type of the intrinsic - out param // sizeBytes - size of SIMD vector type on which the method is invoked - out param // // Return Value: // SIMDIntrinsicInfo struct initialized corresponding to methodHnd. // Sets SIMDIntrinsicInfo.id to SIMDIntrinsicInvalid if methodHnd doesn't correspond // to any SIMD intrinsic. Also, sets the out params inOutTypeHnd, argCount, baseType and // sizeBytes. // // Note that VectorMath class doesn't have a base type and first argument of the method // determines the SIMD vector type on which intrinsic is invoked. In such a case inOutTypeHnd // is modified by this routine. // // TODO-Throughput: The current implementation is based on method name string parsing. // Although we now have type identification from the VM, the parsing of intrinsic names // could be made more efficient. // const SIMDIntrinsicInfo* Compiler::getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* inOutTypeHnd, CORINFO_METHOD_HANDLE methodHnd, CORINFO_SIG_INFO* sig, bool isNewObj, unsigned* argCount, CorInfoType* simdBaseJitType, unsigned* sizeBytes) { assert(supportSIMDTypes()); assert(simdBaseJitType != nullptr); assert(sizeBytes != nullptr); // get simdBaseJitType and size of the type CORINFO_CLASS_HANDLE typeHnd = *inOutTypeHnd; *simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(typeHnd, sizeBytes); if (typeHnd == m_simdHandleCache->SIMDVectorHandle) { // All of the supported intrinsics on this static class take a first argument that's a vector, // which determines the simdBaseJitType. // The exception is the IsHardwareAccelerated property, which is handled as a special case. assert(*simdBaseJitType == CORINFO_TYPE_UNDEF); if (sig->numArgs == 0) { const SIMDIntrinsicInfo* hwAccelIntrinsicInfo = &(simdIntrinsicInfoArray[SIMDIntrinsicHWAccel]); if ((strcmp(eeGetMethodName(methodHnd, nullptr), hwAccelIntrinsicInfo->methodName) == 0) && JITtype2varType(sig->retType) == hwAccelIntrinsicInfo->retType) { // Sanity check assert(hwAccelIntrinsicInfo->argCount == 0 && hwAccelIntrinsicInfo->isInstMethod == false); return hwAccelIntrinsicInfo; } return nullptr; } else { typeHnd = info.compCompHnd->getArgClass(sig, sig->args); *inOutTypeHnd = typeHnd; *simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(typeHnd, sizeBytes); } } if (*simdBaseJitType == CORINFO_TYPE_UNDEF) { JITDUMP("NOT a SIMD Intrinsic: unsupported baseType\n"); return nullptr; } var_types simdBaseType = JitType2PreciseVarType(*simdBaseJitType); // account for implicit "this" arg *argCount = sig->numArgs; if (sig->hasThis()) { *argCount += 1; } // Get the Intrinsic Id by parsing method name. // // TODO-Throughput: replace sequential search by binary search by arranging entries // sorted by method name. SIMDIntrinsicID intrinsicId = SIMDIntrinsicInvalid; const char* methodName = eeGetMethodName(methodHnd, nullptr); for (int i = SIMDIntrinsicNone + 1; i < SIMDIntrinsicInvalid; ++i) { if (strcmp(methodName, simdIntrinsicInfoArray[i].methodName) == 0) { // Found an entry for the method; further check whether it is one of // the supported base types. bool found = false; for (int j = 0; j < SIMD_INTRINSIC_MAX_BASETYPE_COUNT; ++j) { // Convention: if there are fewer base types supported than MAX_BASETYPE_COUNT, // the end of the list is marked by TYP_UNDEF. if (simdIntrinsicInfoArray[i].supportedBaseTypes[j] == TYP_UNDEF) { break; } if (simdIntrinsicInfoArray[i].supportedBaseTypes[j] == simdBaseType) { found = true; break; } } if (!found) { continue; } // Now, check the arguments. unsigned int fixedArgCnt = simdIntrinsicInfoArray[i].argCount; unsigned int expectedArgCnt = fixedArgCnt; // First handle SIMDIntrinsicInitN, where the arg count depends on the type. // The listed arg types include the vector and the first two init values, which is the expected number // for Vector2. For other cases, we'll check their types here. if (*argCount > expectedArgCnt) { if (i == SIMDIntrinsicInitN) { if (*argCount == 3 && typeHnd == m_simdHandleCache->SIMDVector2Handle) { expectedArgCnt = 3; } else if (*argCount == 4 && typeHnd == m_simdHandleCache->SIMDVector3Handle) { expectedArgCnt = 4; } else if (*argCount == 5 && typeHnd == m_simdHandleCache->SIMDVector4Handle) { expectedArgCnt = 5; } } else if (i == SIMDIntrinsicInitFixed) { if (*argCount == 4 && typeHnd == m_simdHandleCache->SIMDVector4Handle) { expectedArgCnt = 4; } } } if (*argCount != expectedArgCnt) { continue; } // Validate the types of individual args passed are what is expected of. // If any of the types don't match with what is expected, don't consider // as an intrinsic. This will make an older JIT with SIMD capabilities // resilient to breaking changes to SIMD managed API. // // Note that from IL type stack, args get popped in right to left order // whereas args get listed in method signatures in left to right order. int stackIndex = (expectedArgCnt - 1); // Track the arguments from the signature - we currently only use this to distinguish // integral and pointer types, both of which will by TYP_I_IMPL on the importer stack. CORINFO_ARG_LIST_HANDLE argLst = sig->args; CORINFO_CLASS_HANDLE argClass; for (unsigned int argIndex = 0; found == true && argIndex < expectedArgCnt; argIndex++) { bool isThisPtr = ((argIndex == 0) && sig->hasThis()); // In case of "newobj SIMDVector<T>(T val)", thisPtr won't be present on type stack. // We don't check anything in that case. if (!isThisPtr || !isNewObj) { GenTree* arg = impStackTop(stackIndex).val; var_types argType = arg->TypeGet(); var_types expectedArgType; if (argIndex < fixedArgCnt) { // Convention: // - intrinsicInfo.argType[i] == TYP_UNDEF - intrinsic doesn't have a valid arg at position i // - intrinsicInfo.argType[i] == TYP_UNKNOWN - arg type should be same as simdBaseType // Note that we pop the args off in reverse order. expectedArgType = simdIntrinsicInfoArray[i].argType[argIndex]; assert(expectedArgType != TYP_UNDEF); if (expectedArgType == TYP_UNKNOWN) { // The type of the argument will be genActualType(*simdBaseType). expectedArgType = genActualType(simdBaseType); argType = genActualType(argType); } } else { expectedArgType = simdBaseType; } if (!isThisPtr && argType == TYP_I_IMPL) { // The reference implementation has a constructor that takes a pointer. // We don't want to recognize that one. This requires us to look at the CorInfoType // in order to distinguish a signature with a pointer argument from one with an // integer argument of pointer size, both of which will be TYP_I_IMPL on the stack. // TODO-Review: This seems quite fragile. We should consider beefing up the checking // here. CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass)); if (corType == CORINFO_TYPE_PTR) { found = false; } } if (varTypeIsSIMD(argType)) { argType = TYP_STRUCT; } if (argType != expectedArgType) { found = false; } } if (argIndex != 0 || !sig->hasThis()) { argLst = info.compCompHnd->getArgNext(argLst); } stackIndex--; } // Cross check return type and static vs. instance is what we are expecting. // If not, don't consider it as an intrinsic. // Note that ret type of TYP_UNKNOWN means that it is not known apriori and must be same as simdBaseType if (found) { var_types expectedRetType = simdIntrinsicInfoArray[i].retType; if (expectedRetType == TYP_UNKNOWN) { // JIT maps uint/ulong type vars to TYP_INT/TYP_LONG. expectedRetType = (simdBaseType == TYP_UINT || simdBaseType == TYP_ULONG) ? genActualType(simdBaseType) : simdBaseType; } if (JITtype2varType(sig->retType) != expectedRetType || sig->hasThis() != simdIntrinsicInfoArray[i].isInstMethod) { found = false; } } if (found) { intrinsicId = (SIMDIntrinsicID)i; break; } } } if (intrinsicId != SIMDIntrinsicInvalid) { JITDUMP("Method %s maps to SIMD intrinsic %s\n", methodName, simdIntrinsicNames[intrinsicId]); return &simdIntrinsicInfoArray[intrinsicId]; } else { JITDUMP("Method %s is NOT a SIMD intrinsic\n", methodName); } return nullptr; } /* static */ bool Compiler::vnEncodesResultTypeForSIMDIntrinsic(SIMDIntrinsicID intrinsicId) { switch (intrinsicId) { case SIMDIntrinsicInit: case SIMDIntrinsicSub: case SIMDIntrinsicEqual: case SIMDIntrinsicBitwiseAnd: case SIMDIntrinsicBitwiseOr: case SIMDIntrinsicCast: return true; default: break; } return false; } // Pops and returns GenTree node from importer's type stack. // Normalizes TYP_STRUCT value in case of GT_CALL, GT_RET_EXPR and arg nodes. // // Arguments: // type - the type of value that the caller expects to be popped off the stack. // expectAddr - if true indicates we are expecting type stack entry to be a TYP_BYREF. // structHandle - the class handle to use when normalizing if it is not the same as the stack entry class handle; // this can happen for certain scenarios, such as folding away a static cast, where we want the // value popped to have the type that would have been returned. // // Notes: // If the popped value is a struct, and the expected type is a simd type, it will be set // to that type, otherwise it will assert if the type being popped is not the expected type. GenTree* Compiler::impSIMDPopStack(var_types type, bool expectAddr, CORINFO_CLASS_HANDLE structHandle) { StackEntry se = impPopStack(); typeInfo ti = se.seTypeInfo; GenTree* tree = se.val; // If expectAddr is true implies what we have on stack is address and we need // SIMD type struct that it points to. if (expectAddr) { assert(tree->TypeIs(TYP_BYREF, TYP_I_IMPL)); if (tree->OperGet() == GT_ADDR) { tree = tree->gtGetOp1(); } else { tree = gtNewOperNode(GT_IND, type, tree); } } bool isParam = false; // If we are popping a struct type it must have a matching handle if one is specified. // - If we have an existing 'OBJ' and 'structHandle' is specified, we will change its // handle if it doesn't match. // This can happen when we have a retyping of a vector that doesn't translate to any // actual IR. // - (If it's not an OBJ and it's used in a parameter context where it is required, // impNormStructVal will add one). // if (tree->OperGet() == GT_OBJ) { if ((structHandle != NO_CLASS_HANDLE) && (tree->AsObj()->GetLayout()->GetClassHandle() != structHandle)) { // In this case we need to retain the GT_OBJ to retype the value. tree->AsObj()->SetLayout(typGetObjLayout(structHandle)); } else { GenTree* addr = tree->AsOp()->gtOp1; if ((addr->OperGet() == GT_ADDR) && isSIMDTypeLocal(addr->AsOp()->gtOp1)) { tree = addr->AsOp()->gtOp1; } } } if (tree->OperGet() == GT_LCL_VAR) { isParam = lvaGetDesc(tree->AsLclVarCommon())->lvIsParam; } // normalize TYP_STRUCT value if (varTypeIsStruct(tree) && ((tree->OperGet() == GT_RET_EXPR) || (tree->OperGet() == GT_CALL) || isParam)) { assert(ti.IsType(TI_STRUCT)); if (structHandle == nullptr) { structHandle = ti.GetClassHandleForValueClass(); } tree = impNormStructVal(tree, structHandle, (unsigned)CHECK_SPILL_ALL); } // Now set the type of the tree to the specialized SIMD struct type, if applicable. if (genActualType(tree->gtType) != genActualType(type)) { assert(tree->gtType == TYP_STRUCT); tree->gtType = type; } else if (tree->gtType == TYP_BYREF) { assert(tree->IsLocal() || (tree->OperGet() == GT_RET_EXPR) || (tree->OperGet() == GT_CALL) || ((tree->gtOper == GT_ADDR) && varTypeIsSIMD(tree->gtGetOp1()))); } return tree; } #ifdef TARGET_XARCH // impSIMDLongRelOpEqual: transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain == comparison result. // // Arguments: // typeHnd - type handle of SIMD vector // size - SIMD vector size // op1 - in-out parameter; first operand // op2 - in-out parameter; second operand // // Return Value: // Modifies in-out params op1, op2 and returns intrinsic ID to be applied to modified operands // SIMDIntrinsicID Compiler::impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd, unsigned size, GenTree** pOp1, GenTree** pOp2) { var_types simdType = (*pOp1)->TypeGet(); assert(varTypeIsSIMD(simdType) && ((*pOp2)->TypeGet() == simdType)); // There is no direct SSE2 support for comparing TYP_LONG vectors. // These have to be implemented in terms of TYP_INT vector comparison operations. // // Equality(v1, v2): // tmp = (v1 == v2) i.e. compare for equality as if v1 and v2 are vector<int> // result = BitwiseAnd(t, shuffle(t, (2, 3, 0, 1))) // Shuffle is meant to swap the comparison results of low-32-bits and high 32-bits of respective long elements. // Compare vector<long> as if they were vector<int> and assign the result to a temp GenTree* compResult = gtNewSIMDNode(simdType, *pOp1, *pOp2, SIMDIntrinsicEqual, CORINFO_TYPE_INT, size); unsigned lclNum = lvaGrabTemp(true DEBUGARG("SIMD Long ==")); lvaSetStruct(lclNum, typeHnd, false); GenTree* tmp = gtNewLclvNode(lclNum, simdType); GenTree* asg = gtNewTempAssign(lclNum, compResult); // op1 = GT_COMMA(tmp=compResult, tmp) // op2 = Shuffle(tmp, 0xB1) // IntrinsicId = BitwiseAnd *pOp1 = gtNewOperNode(GT_COMMA, simdType, asg, tmp); *pOp2 = gtNewSIMDNode(simdType, gtNewLclvNode(lclNum, simdType), gtNewIconNode(SHUFFLE_ZWXY, TYP_INT), SIMDIntrinsicShuffleSSE2, CORINFO_TYPE_INT, size); return SIMDIntrinsicBitwiseAnd; } #endif // TARGET_XARCH // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain given relop result. // // Arguments: // relOpIntrinsicId - Relational operator SIMD intrinsic // typeHnd - type handle of SIMD vector // size - SIMD vector size // inOutBaseJitType - base JIT type of SIMD vector // pOp1 - in-out parameter; first operand // pOp2 - in-out parameter; second operand // // Return Value: // Modifies in-out params pOp1, pOp2, inOutBaseType and returns intrinsic ID to be applied to modified operands // SIMDIntrinsicID Compiler::impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId, CORINFO_CLASS_HANDLE typeHnd, unsigned size, CorInfoType* inOutBaseJitType, GenTree** pOp1, GenTree** pOp2) { var_types simdType = (*pOp1)->TypeGet(); assert(varTypeIsSIMD(simdType) && ((*pOp2)->TypeGet() == simdType)); assert(isRelOpSIMDIntrinsic(relOpIntrinsicId)); SIMDIntrinsicID intrinsicID = relOpIntrinsicId; #ifdef TARGET_XARCH CorInfoType simdBaseJitType = *inOutBaseJitType; var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); if (varTypeIsFloating(simdBaseType)) { } else if (varTypeIsIntegral(simdBaseType)) { if ((getSIMDSupportLevel() == SIMD_SSE2_Supported) && simdBaseType == TYP_LONG) { // There is no direct SSE2 support for comparing TYP_LONG vectors. // These have to be implemented interms of TYP_INT vector comparison operations. if (intrinsicID == SIMDIntrinsicEqual) { intrinsicID = impSIMDLongRelOpEqual(typeHnd, size, pOp1, pOp2); } else { unreached(); } } // SSE2 and AVX direct support for signed comparison of int32, int16 and int8 types else if (varTypeIsUnsigned(simdBaseType)) { // Vector<byte>, Vector<ushort>, Vector<uint> and Vector<ulong>: // SSE2 supports > for signed comparison. Therefore, to use it for // comparing unsigned numbers, we subtract a constant from both the // operands such that the result fits within the corresponding signed // type. The resulting signed numbers are compared using SSE2 signed // comparison. // // Vector<byte>: constant to be subtracted is 2^7 // Vector<ushort> constant to be subtracted is 2^15 // Vector<uint> constant to be subtracted is 2^31 // Vector<ulong> constant to be subtracted is 2^63 // // We need to treat op1 and op2 as signed for comparison purpose after // the transformation. __int64 constVal = 0; switch (simdBaseType) { case TYP_UBYTE: constVal = 0x80808080; *inOutBaseJitType = CORINFO_TYPE_BYTE; break; case TYP_USHORT: constVal = 0x80008000; *inOutBaseJitType = CORINFO_TYPE_SHORT; break; case TYP_UINT: constVal = 0x80000000; *inOutBaseJitType = CORINFO_TYPE_INT; break; case TYP_ULONG: constVal = 0x8000000000000000LL; *inOutBaseJitType = CORINFO_TYPE_LONG; break; default: unreached(); break; } assert(constVal != 0); // This transformation is not required for equality. if (intrinsicID != SIMDIntrinsicEqual) { // For constructing const vector use either long or int base type. CorInfoType tempBaseJitType; GenTree* initVal; if (simdBaseType == TYP_ULONG) { tempBaseJitType = CORINFO_TYPE_LONG; initVal = gtNewLconNode(constVal); } else { tempBaseJitType = CORINFO_TYPE_INT; initVal = gtNewIconNode((ssize_t)constVal); } initVal->gtType = JITtype2varType(tempBaseJitType); GenTree* constVector = gtNewSIMDNode(simdType, initVal, SIMDIntrinsicInit, tempBaseJitType, size); // Assign constVector to a temp, since we intend to use it more than once // TODO-CQ: We have quite a few such constant vectors constructed during // the importation of SIMD intrinsics. Make sure that we have a single // temp per distinct constant per method. GenTree* tmp = fgInsertCommaFormTemp(&constVector, typeHnd); // op1 = op1 - constVector // op2 = op2 - constVector *pOp1 = gtNewSIMDNode(simdType, *pOp1, constVector, SIMDIntrinsicSub, simdBaseJitType, size); *pOp2 = gtNewSIMDNode(simdType, *pOp2, tmp, SIMDIntrinsicSub, simdBaseJitType, size); } return impSIMDRelOp(intrinsicID, typeHnd, size, inOutBaseJitType, pOp1, pOp2); } } #elif !defined(TARGET_ARM64) assert(!"impSIMDRelOp() unimplemented on target arch"); unreached(); #endif // !TARGET_XARCH return intrinsicID; } //------------------------------------------------------------------------ // getOp1ForConstructor: Get the op1 for a constructor call. // // Arguments: // opcode - the opcode being handled (needed to identify the CEE_NEWOBJ case) // newobjThis - For CEE_NEWOBJ, this is the temp grabbed for the allocated uninitalized object. // clsHnd - The handle of the class of the method. // // Return Value: // The tree node representing the object to be initialized with the constructor. // // Notes: // This method handles the differences between the CEE_NEWOBJ and constructor cases. // GenTree* Compiler::getOp1ForConstructor(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd) { GenTree* op1; if (opcode == CEE_NEWOBJ) { op1 = newobjThis; assert(newobjThis->gtOper == GT_ADDR && newobjThis->AsOp()->gtOp1->gtOper == GT_LCL_VAR); // push newobj result on type stack unsigned tmp = op1->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack()); } else { op1 = impSIMDPopStack(TYP_BYREF); } assert(op1->TypeGet() == TYP_BYREF); return op1; } //------------------------------------------------------------------- // Set the flag that indicates that the lclVar referenced by this tree // is used in a SIMD intrinsic. // Arguments: // tree - GenTree* void Compiler::setLclRelatedToSIMDIntrinsic(GenTree* tree) { assert(tree->OperIsLocal()); LclVarDsc* lclVarDsc = lvaGetDesc(tree->AsLclVarCommon()); lclVarDsc->lvUsedInSIMDIntrinsic = true; } //------------------------------------------------------------- // Check if two field nodes reference at the same memory location. // Notice that this check is just based on pattern matching. // Arguments: // op1 - GenTree*. // op2 - GenTree*. // Return Value: // If op1's parents node and op2's parents node are at the same location, return true. Otherwise, return false bool areFieldsParentsLocatedSame(GenTree* op1, GenTree* op2) { assert(op1->OperGet() == GT_FIELD); assert(op2->OperGet() == GT_FIELD); GenTree* op1ObjRef = op1->AsField()->GetFldObj(); GenTree* op2ObjRef = op2->AsField()->GetFldObj(); while (op1ObjRef != nullptr && op2ObjRef != nullptr) { if (op1ObjRef->OperGet() != op2ObjRef->OperGet()) { break; } else if (op1ObjRef->OperGet() == GT_ADDR) { op1ObjRef = op1ObjRef->AsOp()->gtOp1; op2ObjRef = op2ObjRef->AsOp()->gtOp1; } if (op1ObjRef->OperIsLocal() && op2ObjRef->OperIsLocal() && op1ObjRef->AsLclVarCommon()->GetLclNum() == op2ObjRef->AsLclVarCommon()->GetLclNum()) { return true; } else if (op1ObjRef->OperGet() == GT_FIELD && op2ObjRef->OperGet() == GT_FIELD && op1ObjRef->AsField()->gtFldHnd == op2ObjRef->AsField()->gtFldHnd) { op1ObjRef = op1ObjRef->AsField()->GetFldObj(); op2ObjRef = op2ObjRef->AsField()->GetFldObj(); continue; } else { break; } } return false; } //---------------------------------------------------------------------- // Check whether two field are contiguous // Arguments: // first - GenTree*. The Type of the node should be TYP_FLOAT // second - GenTree*. The Type of the node should be TYP_FLOAT // Return Value: // if the first field is located before second field, and they are located contiguously, // then return true. Otherwise, return false. bool Compiler::areFieldsContiguous(GenTree* first, GenTree* second) { assert(first->OperGet() == GT_FIELD); assert(second->OperGet() == GT_FIELD); assert(first->gtType == TYP_FLOAT); assert(second->gtType == TYP_FLOAT); var_types firstFieldType = first->gtType; var_types secondFieldType = second->gtType; unsigned firstFieldEndOffset = first->AsField()->gtFldOffset + genTypeSize(firstFieldType); unsigned secondFieldOffset = second->AsField()->gtFldOffset; if (firstFieldEndOffset == secondFieldOffset && firstFieldType == secondFieldType && areFieldsParentsLocatedSame(first, second)) { return true; } return false; } //---------------------------------------------------------------------- // areLocalFieldsContiguous: Check whether two local field are contiguous // // Arguments: // first - the first local field // second - the second local field // // Return Value: // If the first field is located before second field, and they are located contiguously, // then return true. Otherwise, return false. // bool Compiler::areLocalFieldsContiguous(GenTreeLclFld* first, GenTreeLclFld* second) { assert(first->TypeIs(TYP_FLOAT)); assert(second->TypeIs(TYP_FLOAT)); return (first->TypeGet() == second->TypeGet()) && (first->GetLclOffs() + genTypeSize(first->TypeGet()) == second->GetLclOffs()); } //------------------------------------------------------------------------------- // Check whether two array element nodes are located contiguously or not. // Arguments: // op1 - GenTree*. // op2 - GenTree*. // Return Value: // if the array element op1 is located before array element op2, and they are contiguous, // then return true. Otherwise, return false. // TODO-CQ: // Right this can only check array element with const number as index. In future, // we should consider to allow this function to check the index using expression. bool Compiler::areArrayElementsContiguous(GenTree* op1, GenTree* op2) { noway_assert(op1->gtOper == GT_INDEX); noway_assert(op2->gtOper == GT_INDEX); GenTreeIndex* op1Index = op1->AsIndex(); GenTreeIndex* op2Index = op2->AsIndex(); GenTree* op1ArrayRef = op1Index->Arr(); GenTree* op2ArrayRef = op2Index->Arr(); assert(op1ArrayRef->TypeGet() == TYP_REF); assert(op2ArrayRef->TypeGet() == TYP_REF); GenTree* op1IndexNode = op1Index->Index(); GenTree* op2IndexNode = op2Index->Index(); if ((op1IndexNode->OperGet() == GT_CNS_INT && op2IndexNode->OperGet() == GT_CNS_INT) && op1IndexNode->AsIntCon()->gtIconVal + 1 == op2IndexNode->AsIntCon()->gtIconVal) { if (op1ArrayRef->OperGet() == GT_FIELD && op2ArrayRef->OperGet() == GT_FIELD && areFieldsParentsLocatedSame(op1ArrayRef, op2ArrayRef)) { return true; } else if (op1ArrayRef->OperIsLocal() && op2ArrayRef->OperIsLocal() && op1ArrayRef->AsLclVarCommon()->GetLclNum() == op2ArrayRef->AsLclVarCommon()->GetLclNum()) { return true; } } return false; } //------------------------------------------------------------------------------- // Check whether two argument nodes are contiguous or not. // Arguments: // op1 - GenTree*. // op2 - GenTree*. // Return Value: // if the argument node op1 is located before argument node op2, and they are located contiguously, // then return true. Otherwise, return false. // TODO-CQ: // Right now this can only check field and array. In future we should add more cases. // bool Compiler::areArgumentsContiguous(GenTree* op1, GenTree* op2) { if (op1->OperGet() == GT_INDEX && op2->OperGet() == GT_INDEX) { return areArrayElementsContiguous(op1, op2); } else if (op1->OperGet() == GT_FIELD && op2->OperGet() == GT_FIELD) { return areFieldsContiguous(op1, op2); } else if (op1->OperIs(GT_LCL_FLD) && op2->OperIs(GT_LCL_FLD)) { return areLocalFieldsContiguous(op1->AsLclFld(), op2->AsLclFld()); } return false; } //-------------------------------------------------------------------------------------------------------- // createAddressNodeForSIMDInit: Generate the address node(GT_LEA) if we want to intialize vector2, vector3 or vector4 // from first argument's address. // // Arguments: // tree - GenTree*. This the tree node which is used to get the address for indir. // simdsize - unsigned. This the simd vector size. // arrayElementsCount - unsigned. This is used for generating the boundary check for array. // // Return value: // return the address node. // // TODO-CQ: // 1. Currently just support for GT_FIELD and GT_INDEX, because we can only verify the GT_INDEX node or GT_Field // are located contiguously or not. In future we should support more cases. // 2. Though it happens to just work fine front-end phases are not aware of GT_LEA node. Therefore, convert these // to use GT_ADDR. GenTree* Compiler::createAddressNodeForSIMDInit(GenTree* tree, unsigned simdSize) { assert(tree->OperGet() == GT_FIELD || tree->OperGet() == GT_INDEX); GenTree* byrefNode = nullptr; GenTree* startIndex = nullptr; unsigned offset = 0; var_types baseType = tree->gtType; if (tree->OperGet() == GT_FIELD) { GenTree* objRef = tree->AsField()->GetFldObj(); if (objRef != nullptr && objRef->gtOper == GT_ADDR) { GenTree* obj = objRef->AsOp()->gtOp1; // If the field is directly from a struct, then in this case, // we should set this struct's lvUsedInSIMDIntrinsic as true, // so that this sturct won't be promoted. // e.g. s.x x is a field, and s is a struct, then we should set the s's lvUsedInSIMDIntrinsic as true. // so that s won't be promoted. // Notice that if we have a case like s1.s2.x. s1 s2 are struct, and x is a field, then it is possible that // s1 can be promoted, so that s2 can be promoted. The reason for that is if we don't allow s1 to be // promoted, then this will affect the other optimizations which are depend on s1's struct promotion. // TODO-CQ: // In future, we should optimize this case so that if there is a nested field like s1.s2.x and s1.s2.x's // address is used for initializing the vector, then s1 can be promoted but s2 can't. if (varTypeIsSIMD(obj) && obj->OperIsLocal()) { setLclRelatedToSIMDIntrinsic(obj); } } byrefNode = gtCloneExpr(tree->AsField()->GetFldObj()); assert(byrefNode != nullptr); offset = tree->AsField()->gtFldOffset; } else if (tree->OperGet() == GT_INDEX) { GenTree* index = tree->AsIndex()->Index(); assert(index->OperGet() == GT_CNS_INT); GenTree* checkIndexExpr = nullptr; unsigned indexVal = (unsigned)(index->AsIntCon()->gtIconVal); offset = indexVal * genTypeSize(tree->TypeGet()); GenTree* arrayRef = tree->AsIndex()->Arr(); // Generate the boundary check exception. // The length for boundary check should be the maximum index number which should be // (first argument's index number) + (how many array arguments we have) - 1 // = indexVal + arrayElementsCount - 1 unsigned arrayElementsCount = simdSize / genTypeSize(baseType); checkIndexExpr = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, indexVal + arrayElementsCount - 1); GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, arrayRef, (int)OFFSETOF__CORINFO_Array__length, compCurBB); GenTreeBoundsChk* arrBndsChk = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(checkIndexExpr, arrLen, SCK_ARG_RNG_EXCPN); offset += OFFSETOF__CORINFO_Array__data; byrefNode = gtNewOperNode(GT_COMMA, arrayRef->TypeGet(), arrBndsChk, gtCloneExpr(arrayRef)); } else { unreached(); } GenTree* address = new (this, GT_LEA) GenTreeAddrMode(TYP_BYREF, byrefNode, startIndex, genTypeSize(tree->TypeGet()), offset); return address; } //------------------------------------------------------------------------------- // impMarkContiguousSIMDFieldAssignments: Try to identify if there are contiguous // assignments from SIMD field to memory. If there are, then mark the related // lclvar so that it won't be promoted. // // Arguments: // stmt - GenTree*. Input statement node. void Compiler::impMarkContiguousSIMDFieldAssignments(Statement* stmt) { if (!supportSIMDTypes() || opts.OptimizationDisabled()) { return; } GenTree* expr = stmt->GetRootNode(); if (expr->OperGet() == GT_ASG && expr->TypeGet() == TYP_FLOAT) { GenTree* curDst = expr->AsOp()->gtOp1; GenTree* curSrc = expr->AsOp()->gtOp2; unsigned index = 0; CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned simdSize = 0; GenTree* srcSimdStructNode = getSIMDStructFromField(curSrc, &simdBaseJitType, &index, &simdSize, true); if (srcSimdStructNode == nullptr || simdBaseJitType != CORINFO_TYPE_FLOAT) { fgPreviousCandidateSIMDFieldAsgStmt = nullptr; } else if (index == 0 && isSIMDTypeLocal(srcSimdStructNode)) { fgPreviousCandidateSIMDFieldAsgStmt = stmt; } else if (fgPreviousCandidateSIMDFieldAsgStmt != nullptr) { assert(index > 0); var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); GenTree* prevAsgExpr = fgPreviousCandidateSIMDFieldAsgStmt->GetRootNode(); GenTree* prevDst = prevAsgExpr->AsOp()->gtOp1; GenTree* prevSrc = prevAsgExpr->AsOp()->gtOp2; if (!areArgumentsContiguous(prevDst, curDst) || !areArgumentsContiguous(prevSrc, curSrc)) { fgPreviousCandidateSIMDFieldAsgStmt = nullptr; } else { if (index == (simdSize / genTypeSize(simdBaseType) - 1)) { // Successfully found the pattern, mark the lclvar as UsedInSIMDIntrinsic if (srcSimdStructNode->OperIsLocal()) { setLclRelatedToSIMDIntrinsic(srcSimdStructNode); } if (curDst->OperGet() == GT_FIELD) { GenTree* objRef = curDst->AsField()->GetFldObj(); if (objRef != nullptr && objRef->gtOper == GT_ADDR) { GenTree* obj = objRef->AsOp()->gtOp1; if (varTypeIsStruct(obj) && obj->OperIsLocal()) { setLclRelatedToSIMDIntrinsic(obj); } } } } else { fgPreviousCandidateSIMDFieldAsgStmt = stmt; } } } } else { fgPreviousCandidateSIMDFieldAsgStmt = nullptr; } } //------------------------------------------------------------------------ // impSIMDIntrinsic: Check method to see if it is a SIMD method // // Arguments: // opcode - the opcode being handled (needed to identify the CEE_NEWOBJ case) // newobjThis - For CEE_NEWOBJ, this is the temp grabbed for the allocated uninitalized object. // clsHnd - The handle of the class of the method. // method - The handle of the method. // sig - The call signature for the method. // memberRef - The memberRef token for the method reference. // // Return Value: // If clsHnd is a known SIMD type, and 'method' is one of the methods that are // implemented as an intrinsic in the JIT, then return the tree that implements // it. // GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE methodHnd, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef) { assert(supportSIMDTypes()); // Exit early if we are not in one of the SIMD types. if (!isSIMDClass(clsHnd)) { return nullptr; } // Exit early if the method is not a JIT Intrinsic (which requires the [Intrinsic] attribute). if ((methodFlags & CORINFO_FLG_INTRINSIC) == 0) { return nullptr; } // Get base type and intrinsic Id CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned size = 0; unsigned argCount = 0; const SIMDIntrinsicInfo* intrinsicInfo = getSIMDIntrinsicInfo(&clsHnd, methodHnd, sig, (opcode == CEE_NEWOBJ), &argCount, &simdBaseJitType, &size); // Exit early if the intrinsic is invalid or unrecognized if ((intrinsicInfo == nullptr) || (intrinsicInfo->id == SIMDIntrinsicInvalid)) { return nullptr; } if (!IsBaselineSimdIsaSupported()) { // The user disabled support for the baseline ISA so // don't emit any SIMD intrinsics as they all require // this at a minimum. We will, however, return false // for IsHardwareAccelerated as that will help with // dead code elimination. return (intrinsicInfo->id == SIMDIntrinsicHWAccel) ? gtNewIconNode(0, TYP_INT) : nullptr; } SIMDIntrinsicID simdIntrinsicID = intrinsicInfo->id; var_types simdBaseType; var_types simdType; if (simdBaseJitType != CORINFO_TYPE_UNDEF) { simdBaseType = JitType2PreciseVarType(simdBaseJitType); simdType = getSIMDTypeForSize(size); } else { assert(simdIntrinsicID == SIMDIntrinsicHWAccel); simdBaseType = TYP_UNKNOWN; simdType = TYP_UNKNOWN; } bool instMethod = intrinsicInfo->isInstMethod; var_types callType = JITtype2varType(sig->retType); if (callType == TYP_STRUCT) { // Note that here we are assuming that, if the call returns a struct, that it is the same size as the // struct on which the method is declared. This is currently true for all methods on Vector types, // but if this ever changes, we will need to determine the callType from the signature. assert(info.compCompHnd->getClassSize(sig->retTypeClass) == genTypeSize(simdType)); callType = simdType; } GenTree* simdTree = nullptr; GenTree* op1 = nullptr; GenTree* op2 = nullptr; GenTree* op3 = nullptr; GenTree* retVal = nullptr; GenTree* copyBlkDst = nullptr; bool doCopyBlk = false; switch (simdIntrinsicID) { case SIMDIntrinsicInit: case SIMDIntrinsicInitN: { // SIMDIntrinsicInit: // op2 - the initializer value // op1 - byref of vector // // SIMDIntrinsicInitN // op2 - list of initializer values stitched into a list // op1 - byref of vector IntrinsicNodeBuilder nodeBuilder(getAllocator(CMK_ASTNode), argCount - 1); bool initFromFirstArgIndir = false; if (simdIntrinsicID == SIMDIntrinsicInit) { op2 = impSIMDPopStack(simdBaseType); nodeBuilder.AddOperand(0, op2); } else { assert(simdIntrinsicID == SIMDIntrinsicInitN); assert(simdBaseType == TYP_FLOAT); unsigned initCount = argCount - 1; unsigned elementCount = getSIMDVectorLength(size, simdBaseType); noway_assert(initCount == elementCount); // Build an array with the N values. // We must maintain left-to-right order of the args, but we will pop // them off in reverse order (the Nth arg was pushed onto the stack last). GenTree* prevArg = nullptr; bool areArgsContiguous = true; for (unsigned i = 0; i < initCount; i++) { GenTree* arg = impSIMDPopStack(simdBaseType); if (areArgsContiguous) { GenTree* curArg = arg; if (prevArg != nullptr) { // Recall that we are popping the args off the stack in reverse order. areArgsContiguous = areArgumentsContiguous(curArg, prevArg); } prevArg = curArg; } assert(genActualType(arg) == genActualType(simdBaseType)); nodeBuilder.AddOperand(initCount - i - 1, arg); } if (areArgsContiguous && simdBaseType == TYP_FLOAT) { // Since Vector2, Vector3 and Vector4's arguments type are only float, // we intialize the vector from first argument address, only when // the simdBaseType is TYP_FLOAT and the arguments are located contiguously in memory initFromFirstArgIndir = true; GenTree* op2Address = createAddressNodeForSIMDInit(nodeBuilder.GetOperand(0), size); var_types simdType = getSIMDTypeForSize(size); op2 = gtNewOperNode(GT_IND, simdType, op2Address); } } op1 = getOp1ForConstructor(opcode, newobjThis, clsHnd); assert(op1->TypeGet() == TYP_BYREF); // For integral base types of size less than TYP_INT, expand the initializer // to fill size of TYP_INT bytes. if (varTypeIsSmallInt(simdBaseType)) { // This case should occur only for Init intrinsic. assert(simdIntrinsicID == SIMDIntrinsicInit); unsigned baseSize = genTypeSize(simdBaseType); int multiplier; if (baseSize == 1) { multiplier = 0x01010101; } else { assert(baseSize == 2); multiplier = 0x00010001; } GenTree* t1 = nullptr; if (simdBaseType == TYP_BYTE) { // What we have is a signed byte initializer, // which when loaded to a reg will get sign extended to TYP_INT. // But what we need is the initializer without sign extended or // rather zero extended to 32-bits. t1 = gtNewOperNode(GT_AND, TYP_INT, op2, gtNewIconNode(0xff, TYP_INT)); } else if (simdBaseType == TYP_SHORT) { // What we have is a signed short initializer, // which when loaded to a reg will get sign extended to TYP_INT. // But what we need is the initializer without sign extended or // rather zero extended to 32-bits. t1 = gtNewOperNode(GT_AND, TYP_INT, op2, gtNewIconNode(0xffff, TYP_INT)); } else { // TODO-Casts: this cast is useless. assert(simdBaseType == TYP_UBYTE || simdBaseType == TYP_USHORT); t1 = gtNewCastNode(TYP_INT, op2, false, TYP_INT); } assert(t1 != nullptr); GenTree* t2 = gtNewIconNode(multiplier, TYP_INT); op2 = gtNewOperNode(GT_MUL, TYP_INT, t1, t2); // Construct a vector of TYP_INT with the new initializer and cast it back to vector of simdBaseType simdTree = gtNewSIMDNode(simdType, op2, simdIntrinsicID, CORINFO_TYPE_INT, size); simdTree = gtNewSIMDNode(simdType, simdTree, SIMDIntrinsicCast, simdBaseJitType, size); } else { if (initFromFirstArgIndir) { simdTree = op2; if (op1->AsOp()->gtOp1->OperIsLocal()) { // label the dst struct's lclvar is used for SIMD intrinsic, // so that this dst struct won't be promoted. setLclRelatedToSIMDIntrinsic(op1->AsOp()->gtOp1); } } else { simdTree = new (this, GT_SIMD) GenTreeSIMD(simdType, std::move(nodeBuilder), simdIntrinsicID, simdBaseJitType, size); } } copyBlkDst = op1; doCopyBlk = true; } break; case SIMDIntrinsicInitArray: case SIMDIntrinsicInitArrayX: case SIMDIntrinsicCopyToArray: case SIMDIntrinsicCopyToArrayX: { // op3 - index into array in case of SIMDIntrinsicCopyToArrayX and SIMDIntrinsicInitArrayX // op2 - array itself // op1 - byref to vector struct unsigned int vectorLength = getSIMDVectorLength(size, simdBaseType); // (This constructor takes only the zero-based arrays.) // We will add one or two bounds checks: // 1. If we have an index, we must do a check on that first. // We can't combine it with the index + vectorLength check because // a. It might be negative, and b. It may need to raise a different exception // (captured as SCK_ARG_RNG_EXCPN for CopyTo and Init). // 2. We need to generate a check (SCK_ARG_EXCPN for CopyTo and Init) // for the last array element we will access. // We'll either check against (vectorLength - 1) or (index + vectorLength - 1). GenTree* checkIndexExpr = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, vectorLength - 1); // Get the index into the array. If it has been provided, it will be on the // top of the stack. Otherwise, it is null. if (argCount == 3) { op3 = impSIMDPopStack(TYP_INT); if (op3->IsIntegralConst(0)) { op3 = nullptr; } } else { // TODO-CQ: Here, or elsewhere, check for the pattern where op2 is a newly constructed array, and // change this to the InitN form. // op3 = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, 0); op3 = nullptr; } // Clone the array for use in the bounds check. op2 = impSIMDPopStack(TYP_REF); assert(op2->TypeGet() == TYP_REF); GenTree* arrayRefForArgChk = op2; GenTree* argRngChk = nullptr; if ((arrayRefForArgChk->gtFlags & GTF_SIDE_EFFECT) != 0) { op2 = fgInsertCommaFormTemp(&arrayRefForArgChk); } else { op2 = gtCloneExpr(arrayRefForArgChk); } assert(op2 != nullptr); if (op3 != nullptr) { // We need to use the original expression on this, which is the first check. GenTree* arrayRefForArgRngChk = arrayRefForArgChk; // Then we clone the clone we just made for the next check. arrayRefForArgChk = gtCloneExpr(op2); // We know we MUST have had a cloneable expression. assert(arrayRefForArgChk != nullptr); GenTree* index = op3; if ((index->gtFlags & GTF_SIDE_EFFECT) != 0) { op3 = fgInsertCommaFormTemp(&index); } else { op3 = gtCloneExpr(index); } GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, arrayRefForArgRngChk, (int)OFFSETOF__CORINFO_Array__length, compCurBB); argRngChk = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, arrLen, SCK_ARG_RNG_EXCPN); // Now, clone op3 to create another node for the argChk GenTree* index2 = gtCloneExpr(op3); assert(index != nullptr); checkIndexExpr = gtNewOperNode(GT_ADD, TYP_INT, index2, checkIndexExpr); } // Insert a bounds check for index + offset - 1. // This must be a "normal" array. SpecialCodeKind op2CheckKind; if (simdIntrinsicID == SIMDIntrinsicInitArray || simdIntrinsicID == SIMDIntrinsicInitArrayX) { op2CheckKind = SCK_ARG_RNG_EXCPN; } else { op2CheckKind = SCK_ARG_EXCPN; } GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, arrayRefForArgChk, (int)OFFSETOF__CORINFO_Array__length, compCurBB); GenTreeBoundsChk* argChk = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(checkIndexExpr, arrLen, op2CheckKind); // Create a GT_COMMA tree for the bounds check(s). op2 = gtNewOperNode(GT_COMMA, op2->TypeGet(), argChk, op2); if (argRngChk != nullptr) { op2 = gtNewOperNode(GT_COMMA, op2->TypeGet(), argRngChk, op2); } if (simdIntrinsicID == SIMDIntrinsicInitArray || simdIntrinsicID == SIMDIntrinsicInitArrayX) { op1 = getOp1ForConstructor(opcode, newobjThis, clsHnd); simdTree = (op3 != nullptr) ? gtNewSIMDNode(simdType, op2, op3, SIMDIntrinsicInitArray, simdBaseJitType, size) : gtNewSIMDNode(simdType, op2, SIMDIntrinsicInitArray, simdBaseJitType, size); copyBlkDst = op1; doCopyBlk = true; } else { assert(simdIntrinsicID == SIMDIntrinsicCopyToArray || simdIntrinsicID == SIMDIntrinsicCopyToArrayX); op1 = impSIMDPopStack(simdType, instMethod); assert(op1->TypeGet() == simdType); // copy vector (op1) to array (op2) starting at index (op3) simdTree = op1; // TODO-Cleanup: Though it happens to just work fine front-end phases are not aware of GT_LEA node. // Therefore, convert these to use GT_ADDR . copyBlkDst = new (this, GT_LEA) GenTreeAddrMode(TYP_BYREF, op2, op3, genTypeSize(simdBaseType), OFFSETOF__CORINFO_Array__data); doCopyBlk = true; } } break; case SIMDIntrinsicInitFixed: { // We are initializing a fixed-length vector VLarge with a smaller fixed-length vector VSmall, plus 1 or 2 // additional floats. // op4 (optional) - float value for VLarge.W, if VLarge is Vector4, and VSmall is Vector2 // op3 - float value for VLarge.Z or VLarge.W // op2 - VSmall // op1 - byref of VLarge assert(simdBaseType == TYP_FLOAT); GenTree* op4 = nullptr; if (argCount == 4) { op4 = impSIMDPopStack(TYP_FLOAT); assert(op4->TypeGet() == TYP_FLOAT); } op3 = impSIMDPopStack(TYP_FLOAT); assert(op3->TypeGet() == TYP_FLOAT); // The input vector will either be TYP_SIMD8 or TYP_SIMD12. var_types smallSIMDType = TYP_SIMD8; if ((op4 == nullptr) && (simdType == TYP_SIMD16)) { smallSIMDType = TYP_SIMD12; } op2 = impSIMDPopStack(smallSIMDType); op1 = getOp1ForConstructor(opcode, newobjThis, clsHnd); // We are going to redefine the operands so that: // - op3 is the value that's going into the Z position, or null if it's a Vector4 constructor with a single // operand, and // - op4 is the W position value, or null if this is a Vector3 constructor. if (size == 16 && argCount == 3) { op4 = op3; op3 = nullptr; } simdTree = op2; if (op3 != nullptr) { simdTree = gtNewSimdWithElementNode(simdType, simdTree, gtNewIconNode(2, TYP_INT), op3, simdBaseJitType, size, /* isSimdAsHWIntrinsic */ true); } if (op4 != nullptr) { simdTree = gtNewSimdWithElementNode(simdType, simdTree, gtNewIconNode(3, TYP_INT), op4, simdBaseJitType, size, /* isSimdAsHWIntrinsic */ true); } copyBlkDst = op1; doCopyBlk = true; } break; case SIMDIntrinsicEqual: { op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType, instMethod); SIMDIntrinsicID intrinsicID = impSIMDRelOp(simdIntrinsicID, clsHnd, size, &simdBaseJitType, &op1, &op2); simdTree = gtNewSIMDNode(genActualType(callType), op1, op2, intrinsicID, simdBaseJitType, size); retVal = simdTree; } break; case SIMDIntrinsicSub: case SIMDIntrinsicBitwiseAnd: case SIMDIntrinsicBitwiseOr: { // op1 is the first operand; if instance method, op1 is "this" arg // op2 is the second operand op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType, instMethod); simdTree = gtNewSIMDNode(simdType, op1, op2, simdIntrinsicID, simdBaseJitType, size); retVal = simdTree; } break; // Unary operators that take and return a Vector. case SIMDIntrinsicCast: { op1 = impSIMDPopStack(simdType, instMethod); simdTree = gtNewSIMDNode(simdType, op1, simdIntrinsicID, simdBaseJitType, size); retVal = simdTree; } break; case SIMDIntrinsicHWAccel: { GenTreeIntCon* intConstTree = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, 1); retVal = intConstTree; } break; default: assert(!"Unimplemented SIMD Intrinsic"); return nullptr; } #if defined(TARGET_XARCH) || defined(TARGET_ARM64) // XArch/Arm64: also indicate that we use floating point registers. // The need for setting this here is that a method may not have SIMD // type lclvars, but might be exercising SIMD intrinsics on fields of // SIMD type. // // e.g. public Vector<float> ComplexVecFloat::sqabs() { return this.r * this.r + this.i * this.i; } compFloatingPointUsed = true; #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) // At this point, we have a tree that we are going to store into a destination. // TODO-1stClassStructs: This should be a simple store or assignment, and should not require // GTF_ALL_EFFECT for the dest. This is currently emulating the previous behavior of // block ops. if (doCopyBlk) { GenTree* dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, simdType, copyBlkDst, typGetBlkLayout(getSIMDTypeSizeInBytes(clsHnd))); dest->gtFlags |= GTF_GLOB_REF; retVal = gtNewBlkOpNode(dest, simdTree, false, // not volatile true); // copyBlock retVal->gtFlags |= ((simdTree->gtFlags | copyBlkDst->gtFlags) & GTF_ALL_EFFECT); } return retVal; } #endif // FEATURE_SIMD
1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/jit/simdashwintrinsic.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "jitpch.h" #include "simdashwintrinsic.h" #ifdef FEATURE_HW_INTRINSICS static const SimdAsHWIntrinsicInfo simdAsHWIntrinsicInfoArray[] = { // clang-format off #if defined(TARGET_XARCH) #define SIMD_AS_HWINTRINSIC(classId, id, name, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, flag) \ {NI_##classId##_##id, name, SimdAsHWIntrinsicClassId::classId, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, static_cast<SimdAsHWIntrinsicFlag>(flag)}, #include "simdashwintrinsiclistxarch.h" #elif defined(TARGET_ARM64) #define SIMD_AS_HWINTRINSIC(classId, id, name, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, flag) \ {NI_##classId##_##id, name, SimdAsHWIntrinsicClassId::classId, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, static_cast<SimdAsHWIntrinsicFlag>(flag)}, #include "simdashwintrinsiclistarm64.h" #else #error Unsupported platform #endif // clang-format on }; //------------------------------------------------------------------------ // lookup: Gets the SimdAsHWIntrinsicInfo associated with a given NamedIntrinsic // // Arguments: // id -- The NamedIntrinsic associated with the SimdAsHWIntrinsic to lookup // // Return Value: // The SimdAsHWIntrinsicInfo associated with id const SimdAsHWIntrinsicInfo& SimdAsHWIntrinsicInfo::lookup(NamedIntrinsic id) { assert(id != NI_Illegal); assert(id > NI_SIMD_AS_HWINTRINSIC_START); assert(id < NI_SIMD_AS_HWINTRINSIC_END); return simdAsHWIntrinsicInfoArray[id - NI_SIMD_AS_HWINTRINSIC_START - 1]; } //------------------------------------------------------------------------ // lookupId: Gets the NamedIntrinsic for a given method name and InstructionSet // // Arguments: // className -- The name of the class associated with the SimdIntrinsic to lookup // methodName -- The name of the method associated with the SimdIntrinsic to lookup // enclosingClassName -- The name of the enclosing class // sizeOfVectorT -- The size of Vector<T> in bytes // // Return Value: // The NamedIntrinsic associated with methodName and classId NamedIntrinsic SimdAsHWIntrinsicInfo::lookupId(CORINFO_SIG_INFO* sig, const char* className, const char* methodName, const char* enclosingClassName, int sizeOfVectorT) { SimdAsHWIntrinsicClassId classId = lookupClassId(className, enclosingClassName, sizeOfVectorT); if (classId == SimdAsHWIntrinsicClassId::Unknown) { return NI_Illegal; } unsigned numArgs = sig->numArgs; bool isInstanceMethod = false; if (sig->hasThis()) { numArgs++; isInstanceMethod = true; } for (int i = 0; i < (NI_SIMD_AS_HWINTRINSIC_END - NI_SIMD_AS_HWINTRINSIC_START - 1); i++) { const SimdAsHWIntrinsicInfo& intrinsicInfo = simdAsHWIntrinsicInfoArray[i]; if (classId != intrinsicInfo.classId) { continue; } if (numArgs != static_cast<unsigned>(intrinsicInfo.numArgs)) { continue; } if (isInstanceMethod != SimdAsHWIntrinsicInfo::IsInstanceMethod(intrinsicInfo.id)) { continue; } if (strcmp(methodName, intrinsicInfo.name) != 0) { continue; } return intrinsicInfo.id; } return NI_Illegal; } //------------------------------------------------------------------------ // lookupClassId: Gets the SimdAsHWIntrinsicClassId for a given class name and enclsoing class name // // Arguments: // className -- The name of the class associated with the SimdAsHWIntrinsicClassId to lookup // enclosingClassName -- The name of the enclosing class // sizeOfVectorT -- The size of Vector<T> in bytes // // Return Value: // The SimdAsHWIntrinsicClassId associated with className and enclosingClassName SimdAsHWIntrinsicClassId SimdAsHWIntrinsicInfo::lookupClassId(const char* className, const char* enclosingClassName, int sizeOfVectorT) { assert(className != nullptr); if ((enclosingClassName != nullptr) || (className[0] != 'V')) { return SimdAsHWIntrinsicClassId::Unknown; } if (strcmp(className, "Vector2") == 0) { return SimdAsHWIntrinsicClassId::Vector2; } if (strcmp(className, "Vector3") == 0) { return SimdAsHWIntrinsicClassId::Vector3; } if (strcmp(className, "Vector4") == 0) { return SimdAsHWIntrinsicClassId::Vector4; } if ((strcmp(className, "Vector") == 0) || (strcmp(className, "Vector`1") == 0)) { #if defined(TARGET_XARCH) if (sizeOfVectorT == 32) { return SimdAsHWIntrinsicClassId::VectorT256; } #endif // TARGET_XARCH assert(sizeOfVectorT == 16); return SimdAsHWIntrinsicClassId::VectorT128; } return SimdAsHWIntrinsicClassId::Unknown; } //------------------------------------------------------------------------ // impSimdAsIntrinsic: Import a SIMD intrinsic as a GT_HWINTRINSIC node if possible // // Arguments: // intrinsic -- id of the intrinsic function. // clsHnd -- class handle containing the intrinsic function. // method -- method handle of the intrinsic function. // sig -- signature of the intrinsic call // mustExpand -- true if the intrinsic must return a GenTree*; otherwise, false // // Return Value: // The GT_HWINTRINSIC node, or nullptr if not a supported intrinsic // GenTree* Compiler::impSimdAsHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, GenTree* newobjThis) { if (!featureSIMD) { // We can't support SIMD intrinsics if the JIT doesn't support the feature return nullptr; } if (!IsBaselineSimdIsaSupported()) { // The user disabled support for the baseline ISA so // don't emit any SIMD intrinsics as they all require // this at a minimum return nullptr; } CORINFO_CLASS_HANDLE argClass = NO_CLASS_HANDLE; var_types retType = JITtype2varType(sig->retType); CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; var_types simdType = TYP_UNKNOWN; unsigned simdSize = 0; unsigned numArgs = sig->numArgs; bool isInstanceMethod = false; // We want to resolve and populate the handle cache for this type even // if it isn't the basis for anything carried on the node. simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(clsHnd, &simdSize); if ((clsHnd != m_simdHandleCache->SIMDVectorHandle) && ((simdBaseJitType == CORINFO_TYPE_UNDEF) || !varTypeIsArithmetic(JitType2PreciseVarType(simdBaseJitType)))) { // We want to exit early if the clsHnd should have a base type and it isn't one // of the supported types. This handles cases like op_Explicit which take a Vector<T> return nullptr; } if (retType == TYP_STRUCT) { simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sig->retTypeSigClass, &simdSize); retType = getSIMDTypeForSize(simdSize); } else if (numArgs != 0) { argClass = info.compCompHnd->getArgClass(sig, sig->args); simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(argClass, &simdSize); } if (sig->hasThis()) { assert(SimdAsHWIntrinsicInfo::IsInstanceMethod(intrinsic)); numArgs++; isInstanceMethod = true; argClass = clsHnd; if (SimdAsHWIntrinsicInfo::BaseTypeFromThisArg(intrinsic)) { assert(simdBaseJitType == CORINFO_TYPE_UNDEF); simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(clsHnd, &simdSize); } } else if ((clsHnd == m_simdHandleCache->SIMDVectorHandle) && (numArgs != 0) && !SimdAsHWIntrinsicInfo::KeepBaseTypeFromRet(intrinsic)) { // We need to fixup the clsHnd in the case we are an intrinsic on Vector // The first argument will be the appropriate Vector<T> handle to use clsHnd = info.compCompHnd->getArgClass(sig, sig->args); // We also need to adjust the simdBaseJitType as some methods on Vector return // a type different than the operation we need to perform. An example // is LessThan or Equals which takes double but returns long. This is // unlike the counterparts on Vector<T> which take a return the same type. simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(clsHnd, &simdSize); } if ((simdBaseJitType == CORINFO_TYPE_UNDEF) || !varTypeIsArithmetic(JitType2PreciseVarType(simdBaseJitType)) || (simdSize == 0)) { // We get here for a devirtualization of IEquatable`1.Equals // or if the user tries to use Vector<T> with an unsupported type return nullptr; } var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); simdType = getSIMDTypeForSize(simdSize); assert(varTypeIsSIMD(simdType)); NamedIntrinsic hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(intrinsic, simdBaseType); if ((hwIntrinsic == NI_Illegal) || !varTypeIsSIMD(simdType)) { // The simdBaseJitType isn't supported by the intrinsic return nullptr; } if (SimdAsHWIntrinsicInfo::IsFloatingPointUsed(intrinsic)) { // Set `compFloatingPointUsed` to cover the scenario where an intrinsic // is operating on SIMD fields, but where no SIMD local vars are in use. compFloatingPointUsed = true; } if (hwIntrinsic == intrinsic) { // The SIMD intrinsic requires special handling outside the normal code path return impSimdAsHWIntrinsicSpecial(intrinsic, clsHnd, sig, retType, simdBaseJitType, simdSize, newobjThis); } CORINFO_InstructionSet hwIntrinsicIsa = HWIntrinsicInfo::lookupIsa(hwIntrinsic); if (!compOpportunisticallyDependsOn(hwIntrinsicIsa)) { // The JIT doesn't support the required ISA return nullptr; } CORINFO_ARG_LIST_HANDLE argList = sig->args; var_types argType = TYP_UNKNOWN; GenTree* op1 = nullptr; GenTree* op2 = nullptr; switch (numArgs) { case 0: { assert(!SimdAsHWIntrinsicInfo::NeedsOperandsSwapped(intrinsic)); return gtNewSimdAsHWIntrinsicNode(retType, hwIntrinsic, simdBaseJitType, simdSize); } case 1: { argType = isInstanceMethod ? simdType : JITtype2varType(strip(info.compCompHnd->getArgType(sig, argList, &argClass))); op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod); assert(!SimdAsHWIntrinsicInfo::NeedsOperandsSwapped(intrinsic)); return gtNewSimdAsHWIntrinsicNode(retType, op1, hwIntrinsic, simdBaseJitType, simdSize); } case 2: { CORINFO_ARG_LIST_HANDLE arg2 = isInstanceMethod ? argList : info.compCompHnd->getArgNext(argList); argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg2, &argClass))); op2 = getArgForHWIntrinsic(argType, argClass); argType = isInstanceMethod ? simdType : JITtype2varType(strip(info.compCompHnd->getArgType(sig, argList, &argClass))); op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod); if (SimdAsHWIntrinsicInfo::NeedsOperandsSwapped(intrinsic)) { std::swap(op1, op2); } return gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, simdBaseJitType, simdSize); } } assert(!"Unexpected SimdAsHWIntrinsic"); return nullptr; } //------------------------------------------------------------------------ // impSimdAsHWIntrinsicSpecial: Import a SIMD intrinsic as a GT_HWINTRINSIC node if possible // This method handles cases which cannot be table driven // // Arguments: // intrinsic -- id of the intrinsic function. // clsHnd -- class handle containing the intrinsic function. // sig -- signature of the intrinsic call // retType -- the return type of the intrinsic call // simdBaseJitType -- the base JIT type of SIMD type of the intrinsic // simdSize -- the size of the SIMD type of the intrinsic // // Return Value: // The GT_HWINTRINSIC node, or nullptr if not a supported intrinsic // GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, var_types retType, CorInfoType simdBaseJitType, unsigned simdSize, GenTree* newobjThis) { var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(featureSIMD); assert(retType != TYP_UNKNOWN); assert(varTypeIsArithmetic(simdBaseType)); assert(simdSize != 0); assert(SimdAsHWIntrinsicInfo::lookupHWIntrinsic(intrinsic, simdBaseType) == intrinsic); var_types simdType = getSIMDTypeForSize(simdSize); assert(varTypeIsSIMD(simdType)); CORINFO_ARG_LIST_HANDLE argList = sig->args; var_types argType = TYP_UNKNOWN; CORINFO_CLASS_HANDLE argClass = NO_CLASS_HANDLE; GenTree* op1 = nullptr; GenTree* op2 = nullptr; GenTree* op3 = nullptr; unsigned numArgs = sig->numArgs; bool isInstanceMethod = false; if (sig->hasThis()) { assert(SimdAsHWIntrinsicInfo::IsInstanceMethod(intrinsic)); numArgs++; isInstanceMethod = true; argClass = clsHnd; } #if defined(TARGET_XARCH) bool isVectorT256 = (SimdAsHWIntrinsicInfo::lookupClassId(intrinsic) == SimdAsHWIntrinsicClassId::VectorT256); // We should have already exited early if SSE2 isn't supported assert(compIsaSupportedDebugOnly(InstructionSet_SSE2)); // Vector<T>, when 32-bytes, requires at least AVX2 assert(!isVectorT256 || compIsaSupportedDebugOnly(InstructionSet_AVX2)); #elif defined(TARGET_ARM64) // We should have already exited early if AdvSimd isn't supported assert(compIsaSupportedDebugOnly(InstructionSet_AdvSimd)); #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 switch (intrinsic) { #if defined(TARGET_XARCH) case NI_VectorT128_ConvertToDouble: case NI_VectorT256_ConvertToDouble: case NI_VectorT128_ConvertToInt64: case NI_VectorT256_ConvertToInt64: case NI_VectorT128_ConvertToUInt32: case NI_VectorT256_ConvertToUInt32: case NI_VectorT128_ConvertToUInt64: case NI_VectorT256_ConvertToUInt64: { // TODO-XARCH-CQ: These intrinsics should be accelerated return nullptr; } case NI_VectorT128_ConvertToSingle: case NI_VectorT256_ConvertToSingle: { if (simdBaseType == TYP_UINT) { // TODO-XARCH-CQ: These intrinsics should be accelerated return nullptr; } break; } #endif // TARGET_XARCH #if defined(TARGET_X86) case NI_VectorT128_CreateBroadcast: case NI_VectorT256_CreateBroadcast: { if (varTypeIsLong(simdBaseType)) { // TODO-XARCH-CQ: It may be beneficial to emit the movq // instruction, which takes a 64-bit memory address and // works on 32-bit x86 systems. return nullptr; } break; } #endif // TARGET_X86 #if defined(TARGET_XARCH) case NI_VectorT256_As: #endif // TARGET_XARCH case NI_VectorT128_As: { unsigned retSimdSize; CorInfoType retBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sig->retTypeSigClass, &retSimdSize); if ((retBaseJitType == CORINFO_TYPE_UNDEF) || !varTypeIsArithmetic(JitType2PreciseVarType(retBaseJitType)) || (retSimdSize == 0)) { // We get here if the return type is an unsupported type return nullptr; } break; } #if defined(TARGET_XARCH) case NI_VectorT256_get_Item: case NI_VectorT128_get_Item: { switch (simdBaseType) { // Using software fallback if simdBaseType is not supported by hardware case TYP_BYTE: case TYP_UBYTE: case TYP_INT: case TYP_UINT: case TYP_LONG: case TYP_ULONG: if (!compExactlyDependsOn(InstructionSet_SSE41)) { return nullptr; } break; case TYP_DOUBLE: case TYP_FLOAT: case TYP_SHORT: case TYP_USHORT: // short/ushort/float/double is supported by SSE2 break; default: unreached(); } break; } #endif // TARGET_XARCH #if defined(TARGET_XARCH) case NI_VectorT128_Dot: { if (!compOpportunisticallyDependsOn(InstructionSet_SSE41)) { // We need to exit early if this is Vector<T>.Dot for int or uint and SSE41 is not supported // The other types should be handled via the table driven paths assert((simdBaseType == TYP_INT) || (simdBaseType == TYP_UINT)); return nullptr; } break; } case NI_VectorT128_Sum: { if (varTypeIsFloating(simdBaseType)) { if (!compOpportunisticallyDependsOn(InstructionSet_SSE3)) { // Floating-point types require SSE3.HorizontalAdd return nullptr; } } else if (!compOpportunisticallyDependsOn(InstructionSet_SSSE3)) { // Integral types require SSSE3.HorizontalAdd return nullptr; } break; } #endif // TARGET_XARCH default: { // Most intrinsics have some path that works even if only SSE2/AdvSimd is available break; } } GenTree* copyBlkDst = nullptr; GenTree* copyBlkSrc = nullptr; switch (numArgs) { case 0: { assert(newobjThis == nullptr); switch (intrinsic) { #if defined(TARGET_XARCH) case NI_Vector2_get_One: case NI_Vector3_get_One: case NI_Vector4_get_One: case NI_VectorT128_get_One: case NI_VectorT256_get_One: { switch (simdBaseType) { case TYP_BYTE: case TYP_UBYTE: case TYP_SHORT: case TYP_USHORT: case TYP_INT: case TYP_UINT: { op1 = gtNewIconNode(1, TYP_INT); break; } case TYP_LONG: case TYP_ULONG: { op1 = gtNewLconNode(1); break; } case TYP_FLOAT: case TYP_DOUBLE: { op1 = gtNewDconNode(1.0, simdBaseType); break; } default: { unreached(); } } return gtNewSimdCreateBroadcastNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_get_Count: case NI_VectorT256_get_Count: { GenTreeIntCon* countNode = gtNewIconNode(getSIMDVectorLength(simdSize, simdBaseType), TYP_INT); countNode->gtFlags |= GTF_ICON_SIMD_COUNT; return countNode; } #elif defined(TARGET_ARM64) case NI_Vector2_get_One: case NI_Vector3_get_One: case NI_Vector4_get_One: case NI_VectorT128_get_One: { switch (simdBaseType) { case TYP_BYTE: case TYP_UBYTE: case TYP_SHORT: case TYP_USHORT: case TYP_INT: case TYP_UINT: { op1 = gtNewIconNode(1, TYP_INT); break; } case TYP_LONG: case TYP_ULONG: { op1 = gtNewLconNode(1); break; } case TYP_FLOAT: case TYP_DOUBLE: { op1 = gtNewDconNode(1.0, simdBaseType); break; } default: { unreached(); } } return gtNewSimdCreateBroadcastNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_get_Count: { GenTreeIntCon* countNode = gtNewIconNode(getSIMDVectorLength(simdSize, simdBaseType), TYP_INT); countNode->gtFlags |= GTF_ICON_SIMD_COUNT; return countNode; } #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 default: { // Some platforms warn about unhandled switch cases // We handle it more generally via the assert and nullptr return below. break; } } break; } case 1: { assert(newobjThis == nullptr); bool isOpExplicit = (intrinsic == NI_VectorT128_op_Explicit) || (intrinsic == NI_VectorT128_As); #if defined(TARGET_XARCH) isOpExplicit |= (intrinsic == NI_VectorT256_op_Explicit) || (intrinsic == NI_VectorT256_As); #endif if (isOpExplicit) { // We fold away the cast here, as it only exists to satisfy the // type system. It is safe to do this here since the op1 type // and the signature return type are both the same TYP_SIMD. op1 = impSIMDPopStack(retType, /* expectAddr: */ false, sig->retTypeClass); SetOpLclRelatedToSIMDIntrinsic(op1); assert(op1->gtType == getSIMDTypeForSize(getSIMDTypeSizeInBytes(sig->retTypeSigClass))); return op1; } argType = isInstanceMethod ? simdType : JITtype2varType(strip(info.compCompHnd->getArgType(sig, argList, &argClass))); op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod); assert(!SimdAsHWIntrinsicInfo::NeedsOperandsSwapped(intrinsic)); switch (intrinsic) { #if defined(TARGET_XARCH) case NI_Vector2_Abs: case NI_Vector3_Abs: case NI_Vector4_Abs: case NI_VectorT128_Abs: case NI_VectorT256_Abs: { return gtNewSimdAbsNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ConvertToInt32: case NI_VectorT256_ConvertToInt32: { assert(simdBaseType == TYP_FLOAT); NamedIntrinsic convert = (simdSize == 32) ? NI_AVX_ConvertToVector256Int32WithTruncation : NI_SSE2_ConvertToVector128Int32WithTruncation; return gtNewSimdHWIntrinsicNode(retType, op1, convert, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ConvertToSingle: case NI_VectorT256_ConvertToSingle: { assert(simdBaseType == TYP_INT); NamedIntrinsic convert = (simdSize == 32) ? NI_AVX_ConvertToVector256Single : NI_SSE2_ConvertToVector128Single; return gtNewSimdHWIntrinsicNode(retType, op1, convert, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_Sum: case NI_VectorT256_Sum: { return gtNewSimdSumNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_WidenLower: case NI_VectorT256_WidenLower: { return gtNewSimdWidenLowerNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_WidenUpper: case NI_VectorT256_WidenUpper: { return gtNewSimdWidenUpperNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } #elif defined(TARGET_ARM64) case NI_VectorT128_Abs: { return gtNewSimdAbsNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ConvertToDouble: { assert((simdBaseType == TYP_LONG) || (simdBaseType == TYP_ULONG)); return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_Arm64_ConvertToDouble, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ConvertToInt32: { assert(simdBaseType == TYP_FLOAT); return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_ConvertToInt32RoundToZero, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ConvertToInt64: { assert(simdBaseType == TYP_DOUBLE); return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_Arm64_ConvertToInt64RoundToZero, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ConvertToSingle: { assert((simdBaseType == TYP_INT) || (simdBaseType == TYP_UINT)); return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_ConvertToSingle, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ConvertToUInt32: { assert(simdBaseType == TYP_FLOAT); return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_ConvertToUInt32RoundToZero, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ConvertToUInt64: { assert(simdBaseType == TYP_DOUBLE); return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_Arm64_ConvertToUInt64RoundToZero, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_Sum: { return gtNewSimdSumNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_WidenLower: { return gtNewSimdWidenLowerNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_WidenUpper: { return gtNewSimdWidenUpperNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 default: { // Some platforms warn about unhandled switch cases // We handle it more generally via the assert and nullptr return below. break; } } break; } case 2: { CORINFO_ARG_LIST_HANDLE arg2 = isInstanceMethod ? argList : info.compCompHnd->getArgNext(argList); argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg2, &argClass))); op2 = getArgForHWIntrinsic(argType, argClass); argType = isInstanceMethod ? simdType : JITtype2varType(strip(info.compCompHnd->getArgType(sig, argList, &argClass))); op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod, newobjThis); assert(!SimdAsHWIntrinsicInfo::NeedsOperandsSwapped(intrinsic)); switch (intrinsic) { #if defined(TARGET_XARCH) case NI_Vector2_CreateBroadcast: case NI_Vector3_CreateBroadcast: case NI_Vector4_CreateBroadcast: case NI_VectorT128_CreateBroadcast: case NI_VectorT256_CreateBroadcast: { assert(retType == TYP_VOID); copyBlkDst = op1; copyBlkSrc = gtNewSimdCreateBroadcastNode(simdType, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); break; } case NI_VectorT128_get_Item: case NI_VectorT256_get_Item: { return gtNewSimdGetElementNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_Vector2_op_Division: case NI_Vector3_op_Division: { // Vector2/3 div: since the top-most elements will be zero, we end up // perfoming 0/0 which is a NAN. Therefore, post division we need to set the // top-most elements to zero. This is achieved by left logical shift followed // by right logical shift of the result. // These are 16 byte operations, so we subtract from 16 bytes, not the vector register length. unsigned shiftCount = 16 - simdSize; assert((shiftCount > 0) && (shiftCount <= 16)); // retNode = Sse.Divide(op1, op2); GenTree* retNode = gtNewSimdAsHWIntrinsicNode(retType, op1, op2, NI_SSE_Divide, simdBaseJitType, simdSize); // retNode = Sse.ShiftLeftLogical128BitLane(retNode.AsInt32(), shiftCount).AsSingle() retNode = gtNewSimdAsHWIntrinsicNode(retType, retNode, gtNewIconNode(shiftCount, TYP_INT), NI_SSE2_ShiftLeftLogical128BitLane, CORINFO_TYPE_INT, simdSize); // retNode = Sse.ShiftRightLogical128BitLane(retNode.AsInt32(), shiftCount).AsSingle() retNode = gtNewSimdAsHWIntrinsicNode(retType, retNode, gtNewIconNode(shiftCount, TYP_INT), NI_SSE2_ShiftRightLogical128BitLane, CORINFO_TYPE_INT, simdSize); return retNode; } case NI_VectorT128_Dot: { return gtNewSimdDotProdNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_Equals: { return gtNewSimdCmpOpNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_GreaterThan: case NI_VectorT256_GreaterThan: { return gtNewSimdCmpOpNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_GreaterThanOrEqual: case NI_VectorT256_GreaterThanOrEqual: { return gtNewSimdCmpOpNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_LessThan: case NI_VectorT256_LessThan: { return gtNewSimdCmpOpNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_LessThanOrEqual: case NI_VectorT256_LessThanOrEqual: { return gtNewSimdCmpOpNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_Max: case NI_VectorT256_Max: { return gtNewSimdMaxNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_Min: case NI_VectorT256_Min: { return gtNewSimdMinNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_Narrow: case NI_VectorT256_Narrow: { return gtNewSimdNarrowNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_op_Multiply: case NI_VectorT256_op_Multiply: { return gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ShiftLeft: case NI_VectorT256_ShiftLeft: { return gtNewSimdBinOpNode(GT_LSH, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ShiftRightArithmetic: case NI_VectorT256_ShiftRightArithmetic: { return gtNewSimdBinOpNode(GT_RSH, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ShiftRightLogical: case NI_VectorT256_ShiftRightLogical: { return gtNewSimdBinOpNode(GT_RSZ, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } #elif defined(TARGET_ARM64) case NI_Vector2_CreateBroadcast: case NI_Vector3_CreateBroadcast: case NI_Vector4_CreateBroadcast: case NI_VectorT128_CreateBroadcast: { assert(retType == TYP_VOID); copyBlkDst = op1; copyBlkSrc = gtNewSimdCreateBroadcastNode(simdType, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); break; } case NI_VectorT128_get_Item: { return gtNewSimdGetElementNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_Max: { return gtNewSimdMaxNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_Min: { return gtNewSimdMinNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_Narrow: { return gtNewSimdNarrowNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_op_Multiply: { return gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ShiftLeft: { return gtNewSimdBinOpNode(GT_LSH, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ShiftRightArithmetic: { return gtNewSimdBinOpNode(GT_RSH, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ShiftRightLogical: { return gtNewSimdBinOpNode(GT_RSZ, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 default: { // Some platforms warn about unhandled switch cases // We handle it more generally via the assert and nullptr return below. break; } } break; } case 3: { assert(newobjThis == nullptr); CORINFO_ARG_LIST_HANDLE arg2 = isInstanceMethod ? argList : info.compCompHnd->getArgNext(argList); CORINFO_ARG_LIST_HANDLE arg3 = info.compCompHnd->getArgNext(arg2); argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg3, &argClass))); op3 = getArgForHWIntrinsic(argType, argClass); argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg2, &argClass))); op2 = getArgForHWIntrinsic(argType, argClass); argType = isInstanceMethod ? simdType : JITtype2varType(strip(info.compCompHnd->getArgType(sig, argList, &argClass))); op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod, newobjThis); assert(!SimdAsHWIntrinsicInfo::NeedsOperandsSwapped(intrinsic)); switch (intrinsic) { #if defined(TARGET_XARCH) case NI_VectorT128_ConditionalSelect: case NI_VectorT256_ConditionalSelect: { return gtNewSimdCndSelNode(retType, op1, op2, op3, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } #elif defined(TARGET_ARM64) case NI_VectorT128_ConditionalSelect: { return gtNewSimdCndSelNode(retType, op1, op2, op3, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 default: { // Some platforms warn about unhandled switch cases // We handle it more generally via the assert and nullptr return below. break; } } } } if (copyBlkDst != nullptr) { assert(copyBlkSrc != nullptr); // At this point, we have a tree that we are going to store into a destination. // TODO-1stClassStructs: This should be a simple store or assignment, and should not require // GTF_ALL_EFFECT for the dest. This is currently emulating the previous behavior of // block ops. GenTree* dest = gtNewBlockVal(copyBlkDst, simdSize); dest->gtType = simdType; dest->gtFlags |= GTF_GLOB_REF; GenTree* retNode = gtNewBlkOpNode(dest, copyBlkSrc, /* isVolatile */ false, /* isCopyBlock */ true); retNode->gtFlags |= ((copyBlkDst->gtFlags | copyBlkSrc->gtFlags) & GTF_ALL_EFFECT); return retNode; } assert(copyBlkSrc == nullptr); assert(!"Unexpected SimdAsHWIntrinsic"); return nullptr; } #endif // FEATURE_HW_INTRINSICS
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "jitpch.h" #include "simdashwintrinsic.h" #ifdef FEATURE_HW_INTRINSICS static const SimdAsHWIntrinsicInfo simdAsHWIntrinsicInfoArray[] = { // clang-format off #if defined(TARGET_XARCH) #define SIMD_AS_HWINTRINSIC(classId, id, name, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, flag) \ {NI_##classId##_##id, name, SimdAsHWIntrinsicClassId::classId, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, static_cast<SimdAsHWIntrinsicFlag>(flag)}, #include "simdashwintrinsiclistxarch.h" #elif defined(TARGET_ARM64) #define SIMD_AS_HWINTRINSIC(classId, id, name, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, flag) \ {NI_##classId##_##id, name, SimdAsHWIntrinsicClassId::classId, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, static_cast<SimdAsHWIntrinsicFlag>(flag)}, #include "simdashwintrinsiclistarm64.h" #else #error Unsupported platform #endif // clang-format on }; //------------------------------------------------------------------------ // lookup: Gets the SimdAsHWIntrinsicInfo associated with a given NamedIntrinsic // // Arguments: // id -- The NamedIntrinsic associated with the SimdAsHWIntrinsic to lookup // // Return Value: // The SimdAsHWIntrinsicInfo associated with id const SimdAsHWIntrinsicInfo& SimdAsHWIntrinsicInfo::lookup(NamedIntrinsic id) { assert(id != NI_Illegal); assert(id > NI_SIMD_AS_HWINTRINSIC_START); assert(id < NI_SIMD_AS_HWINTRINSIC_END); return simdAsHWIntrinsicInfoArray[id - NI_SIMD_AS_HWINTRINSIC_START - 1]; } //------------------------------------------------------------------------ // lookupId: Gets the NamedIntrinsic for a given method name and InstructionSet // // Arguments: // className -- The name of the class associated with the SimdIntrinsic to lookup // methodName -- The name of the method associated with the SimdIntrinsic to lookup // enclosingClassName -- The name of the enclosing class // sizeOfVectorT -- The size of Vector<T> in bytes // // Return Value: // The NamedIntrinsic associated with methodName and classId NamedIntrinsic SimdAsHWIntrinsicInfo::lookupId(CORINFO_SIG_INFO* sig, const char* className, const char* methodName, const char* enclosingClassName, int sizeOfVectorT) { SimdAsHWIntrinsicClassId classId = lookupClassId(className, enclosingClassName, sizeOfVectorT); if (classId == SimdAsHWIntrinsicClassId::Unknown) { return NI_Illegal; } unsigned numArgs = sig->numArgs; bool isInstanceMethod = false; if (sig->hasThis()) { numArgs++; isInstanceMethod = true; } for (int i = 0; i < (NI_SIMD_AS_HWINTRINSIC_END - NI_SIMD_AS_HWINTRINSIC_START - 1); i++) { const SimdAsHWIntrinsicInfo& intrinsicInfo = simdAsHWIntrinsicInfoArray[i]; if (classId != intrinsicInfo.classId) { continue; } if (numArgs != static_cast<unsigned>(intrinsicInfo.numArgs)) { continue; } if (isInstanceMethod != SimdAsHWIntrinsicInfo::IsInstanceMethod(intrinsicInfo.id)) { continue; } if (strcmp(methodName, intrinsicInfo.name) != 0) { continue; } return intrinsicInfo.id; } return NI_Illegal; } //------------------------------------------------------------------------ // lookupClassId: Gets the SimdAsHWIntrinsicClassId for a given class name and enclsoing class name // // Arguments: // className -- The name of the class associated with the SimdAsHWIntrinsicClassId to lookup // enclosingClassName -- The name of the enclosing class // sizeOfVectorT -- The size of Vector<T> in bytes // // Return Value: // The SimdAsHWIntrinsicClassId associated with className and enclosingClassName SimdAsHWIntrinsicClassId SimdAsHWIntrinsicInfo::lookupClassId(const char* className, const char* enclosingClassName, int sizeOfVectorT) { assert(className != nullptr); if ((enclosingClassName != nullptr) || (className[0] != 'V')) { return SimdAsHWIntrinsicClassId::Unknown; } if (strcmp(className, "Vector2") == 0) { return SimdAsHWIntrinsicClassId::Vector2; } if (strcmp(className, "Vector3") == 0) { return SimdAsHWIntrinsicClassId::Vector3; } if (strcmp(className, "Vector4") == 0) { return SimdAsHWIntrinsicClassId::Vector4; } if ((strcmp(className, "Vector") == 0) || (strcmp(className, "Vector`1") == 0)) { #if defined(TARGET_XARCH) if (sizeOfVectorT == 32) { return SimdAsHWIntrinsicClassId::VectorT256; } #endif // TARGET_XARCH assert(sizeOfVectorT == 16); return SimdAsHWIntrinsicClassId::VectorT128; } return SimdAsHWIntrinsicClassId::Unknown; } //------------------------------------------------------------------------ // impSimdAsIntrinsic: Import a SIMD intrinsic as a GT_HWINTRINSIC node if possible // // Arguments: // intrinsic -- id of the intrinsic function. // clsHnd -- class handle containing the intrinsic function. // method -- method handle of the intrinsic function. // sig -- signature of the intrinsic call // mustExpand -- true if the intrinsic must return a GenTree*; otherwise, false // // Return Value: // The GT_HWINTRINSIC node, or nullptr if not a supported intrinsic // GenTree* Compiler::impSimdAsHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, GenTree* newobjThis) { if (!supportSIMDTypes()) { // We can't support SIMD intrinsics if the JIT doesn't support the feature return nullptr; } if (!IsBaselineSimdIsaSupported()) { // The user disabled support for the baseline ISA so // don't emit any SIMD intrinsics as they all require // this at a minimum return nullptr; } CORINFO_CLASS_HANDLE argClass = NO_CLASS_HANDLE; var_types retType = JITtype2varType(sig->retType); CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; var_types simdType = TYP_UNKNOWN; unsigned simdSize = 0; unsigned numArgs = sig->numArgs; bool isInstanceMethod = false; // We want to resolve and populate the handle cache for this type even // if it isn't the basis for anything carried on the node. simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(clsHnd, &simdSize); if ((clsHnd != m_simdHandleCache->SIMDVectorHandle) && ((simdBaseJitType == CORINFO_TYPE_UNDEF) || !varTypeIsArithmetic(JitType2PreciseVarType(simdBaseJitType)))) { // We want to exit early if the clsHnd should have a base type and it isn't one // of the supported types. This handles cases like op_Explicit which take a Vector<T> return nullptr; } if (retType == TYP_STRUCT) { simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sig->retTypeSigClass, &simdSize); retType = getSIMDTypeForSize(simdSize); } else if (numArgs != 0) { argClass = info.compCompHnd->getArgClass(sig, sig->args); simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(argClass, &simdSize); } if (sig->hasThis()) { assert(SimdAsHWIntrinsicInfo::IsInstanceMethod(intrinsic)); numArgs++; isInstanceMethod = true; argClass = clsHnd; if (SimdAsHWIntrinsicInfo::BaseTypeFromThisArg(intrinsic)) { assert(simdBaseJitType == CORINFO_TYPE_UNDEF); simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(clsHnd, &simdSize); } } else if ((clsHnd == m_simdHandleCache->SIMDVectorHandle) && (numArgs != 0) && !SimdAsHWIntrinsicInfo::KeepBaseTypeFromRet(intrinsic)) { // We need to fixup the clsHnd in the case we are an intrinsic on Vector // The first argument will be the appropriate Vector<T> handle to use clsHnd = info.compCompHnd->getArgClass(sig, sig->args); // We also need to adjust the simdBaseJitType as some methods on Vector return // a type different than the operation we need to perform. An example // is LessThan or Equals which takes double but returns long. This is // unlike the counterparts on Vector<T> which take a return the same type. simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(clsHnd, &simdSize); } if ((simdBaseJitType == CORINFO_TYPE_UNDEF) || !varTypeIsArithmetic(JitType2PreciseVarType(simdBaseJitType)) || (simdSize == 0)) { // We get here for a devirtualization of IEquatable`1.Equals // or if the user tries to use Vector<T> with an unsupported type return nullptr; } var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); simdType = getSIMDTypeForSize(simdSize); assert(varTypeIsSIMD(simdType)); NamedIntrinsic hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(intrinsic, simdBaseType); if ((hwIntrinsic == NI_Illegal) || !varTypeIsSIMD(simdType)) { // The simdBaseJitType isn't supported by the intrinsic return nullptr; } if (SimdAsHWIntrinsicInfo::IsFloatingPointUsed(intrinsic)) { // Set `compFloatingPointUsed` to cover the scenario where an intrinsic // is operating on SIMD fields, but where no SIMD local vars are in use. compFloatingPointUsed = true; } if (hwIntrinsic == intrinsic) { // The SIMD intrinsic requires special handling outside the normal code path return impSimdAsHWIntrinsicSpecial(intrinsic, clsHnd, sig, retType, simdBaseJitType, simdSize, newobjThis); } CORINFO_InstructionSet hwIntrinsicIsa = HWIntrinsicInfo::lookupIsa(hwIntrinsic); if (!compOpportunisticallyDependsOn(hwIntrinsicIsa)) { // The JIT doesn't support the required ISA return nullptr; } CORINFO_ARG_LIST_HANDLE argList = sig->args; var_types argType = TYP_UNKNOWN; GenTree* op1 = nullptr; GenTree* op2 = nullptr; switch (numArgs) { case 0: { assert(!SimdAsHWIntrinsicInfo::NeedsOperandsSwapped(intrinsic)); return gtNewSimdAsHWIntrinsicNode(retType, hwIntrinsic, simdBaseJitType, simdSize); } case 1: { argType = isInstanceMethod ? simdType : JITtype2varType(strip(info.compCompHnd->getArgType(sig, argList, &argClass))); op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod); assert(!SimdAsHWIntrinsicInfo::NeedsOperandsSwapped(intrinsic)); return gtNewSimdAsHWIntrinsicNode(retType, op1, hwIntrinsic, simdBaseJitType, simdSize); } case 2: { CORINFO_ARG_LIST_HANDLE arg2 = isInstanceMethod ? argList : info.compCompHnd->getArgNext(argList); argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg2, &argClass))); op2 = getArgForHWIntrinsic(argType, argClass); argType = isInstanceMethod ? simdType : JITtype2varType(strip(info.compCompHnd->getArgType(sig, argList, &argClass))); op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod); if (SimdAsHWIntrinsicInfo::NeedsOperandsSwapped(intrinsic)) { std::swap(op1, op2); } return gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, simdBaseJitType, simdSize); } } assert(!"Unexpected SimdAsHWIntrinsic"); return nullptr; } //------------------------------------------------------------------------ // impSimdAsHWIntrinsicSpecial: Import a SIMD intrinsic as a GT_HWINTRINSIC node if possible // This method handles cases which cannot be table driven // // Arguments: // intrinsic -- id of the intrinsic function. // clsHnd -- class handle containing the intrinsic function. // sig -- signature of the intrinsic call // retType -- the return type of the intrinsic call // simdBaseJitType -- the base JIT type of SIMD type of the intrinsic // simdSize -- the size of the SIMD type of the intrinsic // // Return Value: // The GT_HWINTRINSIC node, or nullptr if not a supported intrinsic // GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, var_types retType, CorInfoType simdBaseJitType, unsigned simdSize, GenTree* newobjThis) { var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(supportSIMDTypes()); assert(retType != TYP_UNKNOWN); assert(varTypeIsArithmetic(simdBaseType)); assert(simdSize != 0); assert(SimdAsHWIntrinsicInfo::lookupHWIntrinsic(intrinsic, simdBaseType) == intrinsic); var_types simdType = getSIMDTypeForSize(simdSize); assert(varTypeIsSIMD(simdType)); CORINFO_ARG_LIST_HANDLE argList = sig->args; var_types argType = TYP_UNKNOWN; CORINFO_CLASS_HANDLE argClass = NO_CLASS_HANDLE; GenTree* op1 = nullptr; GenTree* op2 = nullptr; GenTree* op3 = nullptr; unsigned numArgs = sig->numArgs; bool isInstanceMethod = false; if (sig->hasThis()) { assert(SimdAsHWIntrinsicInfo::IsInstanceMethod(intrinsic)); numArgs++; isInstanceMethod = true; argClass = clsHnd; } #if defined(TARGET_XARCH) bool isVectorT256 = (SimdAsHWIntrinsicInfo::lookupClassId(intrinsic) == SimdAsHWIntrinsicClassId::VectorT256); // We should have already exited early if SSE2 isn't supported assert(compIsaSupportedDebugOnly(InstructionSet_SSE2)); // Vector<T>, when 32-bytes, requires at least AVX2 assert(!isVectorT256 || compIsaSupportedDebugOnly(InstructionSet_AVX2)); #elif defined(TARGET_ARM64) // We should have already exited early if AdvSimd isn't supported assert(compIsaSupportedDebugOnly(InstructionSet_AdvSimd)); #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 switch (intrinsic) { #if defined(TARGET_XARCH) case NI_VectorT128_ConvertToDouble: case NI_VectorT256_ConvertToDouble: case NI_VectorT128_ConvertToInt64: case NI_VectorT256_ConvertToInt64: case NI_VectorT128_ConvertToUInt32: case NI_VectorT256_ConvertToUInt32: case NI_VectorT128_ConvertToUInt64: case NI_VectorT256_ConvertToUInt64: { // TODO-XARCH-CQ: These intrinsics should be accelerated return nullptr; } case NI_VectorT128_ConvertToSingle: case NI_VectorT256_ConvertToSingle: { if (simdBaseType == TYP_UINT) { // TODO-XARCH-CQ: These intrinsics should be accelerated return nullptr; } break; } #endif // TARGET_XARCH #if defined(TARGET_X86) case NI_VectorT128_CreateBroadcast: case NI_VectorT256_CreateBroadcast: { if (varTypeIsLong(simdBaseType)) { // TODO-XARCH-CQ: It may be beneficial to emit the movq // instruction, which takes a 64-bit memory address and // works on 32-bit x86 systems. return nullptr; } break; } #endif // TARGET_X86 #if defined(TARGET_XARCH) case NI_VectorT256_As: #endif // TARGET_XARCH case NI_VectorT128_As: { unsigned retSimdSize; CorInfoType retBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sig->retTypeSigClass, &retSimdSize); if ((retBaseJitType == CORINFO_TYPE_UNDEF) || !varTypeIsArithmetic(JitType2PreciseVarType(retBaseJitType)) || (retSimdSize == 0)) { // We get here if the return type is an unsupported type return nullptr; } break; } #if defined(TARGET_XARCH) case NI_VectorT256_get_Item: case NI_VectorT128_get_Item: { switch (simdBaseType) { // Using software fallback if simdBaseType is not supported by hardware case TYP_BYTE: case TYP_UBYTE: case TYP_INT: case TYP_UINT: case TYP_LONG: case TYP_ULONG: if (!compExactlyDependsOn(InstructionSet_SSE41)) { return nullptr; } break; case TYP_DOUBLE: case TYP_FLOAT: case TYP_SHORT: case TYP_USHORT: // short/ushort/float/double is supported by SSE2 break; default: unreached(); } break; } #endif // TARGET_XARCH #if defined(TARGET_XARCH) case NI_VectorT128_Dot: { if (!compOpportunisticallyDependsOn(InstructionSet_SSE41)) { // We need to exit early if this is Vector<T>.Dot for int or uint and SSE41 is not supported // The other types should be handled via the table driven paths assert((simdBaseType == TYP_INT) || (simdBaseType == TYP_UINT)); return nullptr; } break; } case NI_VectorT128_Sum: { if (varTypeIsFloating(simdBaseType)) { if (!compOpportunisticallyDependsOn(InstructionSet_SSE3)) { // Floating-point types require SSE3.HorizontalAdd return nullptr; } } else if (!compOpportunisticallyDependsOn(InstructionSet_SSSE3)) { // Integral types require SSSE3.HorizontalAdd return nullptr; } break; } #endif // TARGET_XARCH default: { // Most intrinsics have some path that works even if only SSE2/AdvSimd is available break; } } GenTree* copyBlkDst = nullptr; GenTree* copyBlkSrc = nullptr; switch (numArgs) { case 0: { assert(newobjThis == nullptr); switch (intrinsic) { #if defined(TARGET_XARCH) case NI_Vector2_get_One: case NI_Vector3_get_One: case NI_Vector4_get_One: case NI_VectorT128_get_One: case NI_VectorT256_get_One: { switch (simdBaseType) { case TYP_BYTE: case TYP_UBYTE: case TYP_SHORT: case TYP_USHORT: case TYP_INT: case TYP_UINT: { op1 = gtNewIconNode(1, TYP_INT); break; } case TYP_LONG: case TYP_ULONG: { op1 = gtNewLconNode(1); break; } case TYP_FLOAT: case TYP_DOUBLE: { op1 = gtNewDconNode(1.0, simdBaseType); break; } default: { unreached(); } } return gtNewSimdCreateBroadcastNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_get_Count: case NI_VectorT256_get_Count: { GenTreeIntCon* countNode = gtNewIconNode(getSIMDVectorLength(simdSize, simdBaseType), TYP_INT); countNode->gtFlags |= GTF_ICON_SIMD_COUNT; return countNode; } #elif defined(TARGET_ARM64) case NI_Vector2_get_One: case NI_Vector3_get_One: case NI_Vector4_get_One: case NI_VectorT128_get_One: { switch (simdBaseType) { case TYP_BYTE: case TYP_UBYTE: case TYP_SHORT: case TYP_USHORT: case TYP_INT: case TYP_UINT: { op1 = gtNewIconNode(1, TYP_INT); break; } case TYP_LONG: case TYP_ULONG: { op1 = gtNewLconNode(1); break; } case TYP_FLOAT: case TYP_DOUBLE: { op1 = gtNewDconNode(1.0, simdBaseType); break; } default: { unreached(); } } return gtNewSimdCreateBroadcastNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_get_Count: { GenTreeIntCon* countNode = gtNewIconNode(getSIMDVectorLength(simdSize, simdBaseType), TYP_INT); countNode->gtFlags |= GTF_ICON_SIMD_COUNT; return countNode; } #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 default: { // Some platforms warn about unhandled switch cases // We handle it more generally via the assert and nullptr return below. break; } } break; } case 1: { assert(newobjThis == nullptr); bool isOpExplicit = (intrinsic == NI_VectorT128_op_Explicit) || (intrinsic == NI_VectorT128_As); #if defined(TARGET_XARCH) isOpExplicit |= (intrinsic == NI_VectorT256_op_Explicit) || (intrinsic == NI_VectorT256_As); #endif if (isOpExplicit) { // We fold away the cast here, as it only exists to satisfy the // type system. It is safe to do this here since the op1 type // and the signature return type are both the same TYP_SIMD. op1 = impSIMDPopStack(retType, /* expectAddr: */ false, sig->retTypeClass); SetOpLclRelatedToSIMDIntrinsic(op1); assert(op1->gtType == getSIMDTypeForSize(getSIMDTypeSizeInBytes(sig->retTypeSigClass))); return op1; } argType = isInstanceMethod ? simdType : JITtype2varType(strip(info.compCompHnd->getArgType(sig, argList, &argClass))); op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod); assert(!SimdAsHWIntrinsicInfo::NeedsOperandsSwapped(intrinsic)); switch (intrinsic) { #if defined(TARGET_XARCH) case NI_Vector2_Abs: case NI_Vector3_Abs: case NI_Vector4_Abs: case NI_VectorT128_Abs: case NI_VectorT256_Abs: { return gtNewSimdAbsNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ConvertToInt32: case NI_VectorT256_ConvertToInt32: { assert(simdBaseType == TYP_FLOAT); NamedIntrinsic convert = (simdSize == 32) ? NI_AVX_ConvertToVector256Int32WithTruncation : NI_SSE2_ConvertToVector128Int32WithTruncation; return gtNewSimdHWIntrinsicNode(retType, op1, convert, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ConvertToSingle: case NI_VectorT256_ConvertToSingle: { assert(simdBaseType == TYP_INT); NamedIntrinsic convert = (simdSize == 32) ? NI_AVX_ConvertToVector256Single : NI_SSE2_ConvertToVector128Single; return gtNewSimdHWIntrinsicNode(retType, op1, convert, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_Sum: case NI_VectorT256_Sum: { return gtNewSimdSumNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_WidenLower: case NI_VectorT256_WidenLower: { return gtNewSimdWidenLowerNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_WidenUpper: case NI_VectorT256_WidenUpper: { return gtNewSimdWidenUpperNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } #elif defined(TARGET_ARM64) case NI_VectorT128_Abs: { return gtNewSimdAbsNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ConvertToDouble: { assert((simdBaseType == TYP_LONG) || (simdBaseType == TYP_ULONG)); return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_Arm64_ConvertToDouble, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ConvertToInt32: { assert(simdBaseType == TYP_FLOAT); return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_ConvertToInt32RoundToZero, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ConvertToInt64: { assert(simdBaseType == TYP_DOUBLE); return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_Arm64_ConvertToInt64RoundToZero, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ConvertToSingle: { assert((simdBaseType == TYP_INT) || (simdBaseType == TYP_UINT)); return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_ConvertToSingle, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ConvertToUInt32: { assert(simdBaseType == TYP_FLOAT); return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_ConvertToUInt32RoundToZero, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ConvertToUInt64: { assert(simdBaseType == TYP_DOUBLE); return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_Arm64_ConvertToUInt64RoundToZero, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_Sum: { return gtNewSimdSumNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_WidenLower: { return gtNewSimdWidenLowerNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_WidenUpper: { return gtNewSimdWidenUpperNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 default: { // Some platforms warn about unhandled switch cases // We handle it more generally via the assert and nullptr return below. break; } } break; } case 2: { CORINFO_ARG_LIST_HANDLE arg2 = isInstanceMethod ? argList : info.compCompHnd->getArgNext(argList); argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg2, &argClass))); op2 = getArgForHWIntrinsic(argType, argClass); argType = isInstanceMethod ? simdType : JITtype2varType(strip(info.compCompHnd->getArgType(sig, argList, &argClass))); op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod, newobjThis); assert(!SimdAsHWIntrinsicInfo::NeedsOperandsSwapped(intrinsic)); switch (intrinsic) { #if defined(TARGET_XARCH) case NI_Vector2_CreateBroadcast: case NI_Vector3_CreateBroadcast: case NI_Vector4_CreateBroadcast: case NI_VectorT128_CreateBroadcast: case NI_VectorT256_CreateBroadcast: { assert(retType == TYP_VOID); copyBlkDst = op1; copyBlkSrc = gtNewSimdCreateBroadcastNode(simdType, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); break; } case NI_VectorT128_get_Item: case NI_VectorT256_get_Item: { return gtNewSimdGetElementNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_Vector2_op_Division: case NI_Vector3_op_Division: { // Vector2/3 div: since the top-most elements will be zero, we end up // perfoming 0/0 which is a NAN. Therefore, post division we need to set the // top-most elements to zero. This is achieved by left logical shift followed // by right logical shift of the result. // These are 16 byte operations, so we subtract from 16 bytes, not the vector register length. unsigned shiftCount = 16 - simdSize; assert((shiftCount > 0) && (shiftCount <= 16)); // retNode = Sse.Divide(op1, op2); GenTree* retNode = gtNewSimdAsHWIntrinsicNode(retType, op1, op2, NI_SSE_Divide, simdBaseJitType, simdSize); // retNode = Sse.ShiftLeftLogical128BitLane(retNode.AsInt32(), shiftCount).AsSingle() retNode = gtNewSimdAsHWIntrinsicNode(retType, retNode, gtNewIconNode(shiftCount, TYP_INT), NI_SSE2_ShiftLeftLogical128BitLane, CORINFO_TYPE_INT, simdSize); // retNode = Sse.ShiftRightLogical128BitLane(retNode.AsInt32(), shiftCount).AsSingle() retNode = gtNewSimdAsHWIntrinsicNode(retType, retNode, gtNewIconNode(shiftCount, TYP_INT), NI_SSE2_ShiftRightLogical128BitLane, CORINFO_TYPE_INT, simdSize); return retNode; } case NI_VectorT128_Dot: { return gtNewSimdDotProdNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_Equals: { return gtNewSimdCmpOpNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_GreaterThan: case NI_VectorT256_GreaterThan: { return gtNewSimdCmpOpNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_GreaterThanOrEqual: case NI_VectorT256_GreaterThanOrEqual: { return gtNewSimdCmpOpNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_LessThan: case NI_VectorT256_LessThan: { return gtNewSimdCmpOpNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_LessThanOrEqual: case NI_VectorT256_LessThanOrEqual: { return gtNewSimdCmpOpNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_Max: case NI_VectorT256_Max: { return gtNewSimdMaxNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_Min: case NI_VectorT256_Min: { return gtNewSimdMinNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_Narrow: case NI_VectorT256_Narrow: { return gtNewSimdNarrowNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_op_Multiply: case NI_VectorT256_op_Multiply: { return gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ShiftLeft: case NI_VectorT256_ShiftLeft: { return gtNewSimdBinOpNode(GT_LSH, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ShiftRightArithmetic: case NI_VectorT256_ShiftRightArithmetic: { return gtNewSimdBinOpNode(GT_RSH, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ShiftRightLogical: case NI_VectorT256_ShiftRightLogical: { return gtNewSimdBinOpNode(GT_RSZ, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } #elif defined(TARGET_ARM64) case NI_Vector2_CreateBroadcast: case NI_Vector3_CreateBroadcast: case NI_Vector4_CreateBroadcast: case NI_VectorT128_CreateBroadcast: { assert(retType == TYP_VOID); copyBlkDst = op1; copyBlkSrc = gtNewSimdCreateBroadcastNode(simdType, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); break; } case NI_VectorT128_get_Item: { return gtNewSimdGetElementNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_Max: { return gtNewSimdMaxNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_Min: { return gtNewSimdMinNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_Narrow: { return gtNewSimdNarrowNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_op_Multiply: { return gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ShiftLeft: { return gtNewSimdBinOpNode(GT_LSH, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ShiftRightArithmetic: { return gtNewSimdBinOpNode(GT_RSH, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } case NI_VectorT128_ShiftRightLogical: { return gtNewSimdBinOpNode(GT_RSZ, retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 default: { // Some platforms warn about unhandled switch cases // We handle it more generally via the assert and nullptr return below. break; } } break; } case 3: { assert(newobjThis == nullptr); CORINFO_ARG_LIST_HANDLE arg2 = isInstanceMethod ? argList : info.compCompHnd->getArgNext(argList); CORINFO_ARG_LIST_HANDLE arg3 = info.compCompHnd->getArgNext(arg2); argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg3, &argClass))); op3 = getArgForHWIntrinsic(argType, argClass); argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg2, &argClass))); op2 = getArgForHWIntrinsic(argType, argClass); argType = isInstanceMethod ? simdType : JITtype2varType(strip(info.compCompHnd->getArgType(sig, argList, &argClass))); op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod, newobjThis); assert(!SimdAsHWIntrinsicInfo::NeedsOperandsSwapped(intrinsic)); switch (intrinsic) { #if defined(TARGET_XARCH) case NI_VectorT128_ConditionalSelect: case NI_VectorT256_ConditionalSelect: { return gtNewSimdCndSelNode(retType, op1, op2, op3, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } #elif defined(TARGET_ARM64) case NI_VectorT128_ConditionalSelect: { return gtNewSimdCndSelNode(retType, op1, op2, op3, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 default: { // Some platforms warn about unhandled switch cases // We handle it more generally via the assert and nullptr return below. break; } } } } if (copyBlkDst != nullptr) { assert(copyBlkSrc != nullptr); // At this point, we have a tree that we are going to store into a destination. // TODO-1stClassStructs: This should be a simple store or assignment, and should not require // GTF_ALL_EFFECT for the dest. This is currently emulating the previous behavior of // block ops. GenTree* dest = gtNewBlockVal(copyBlkDst, simdSize); dest->gtType = simdType; dest->gtFlags |= GTF_GLOB_REF; GenTree* retNode = gtNewBlkOpNode(dest, copyBlkSrc, /* isVolatile */ false, /* isCopyBlock */ true); retNode->gtFlags |= ((copyBlkDst->gtFlags | copyBlkSrc->gtFlags) & GTF_ALL_EFFECT); return retNode; } assert(copyBlkSrc == nullptr); assert(!"Unexpected SimdAsHWIntrinsic"); return nullptr; } #endif // FEATURE_HW_INTRINSICS
1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/inc/sbuffer.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // -------------------------------------------------------------------------------- // SBuffer.h (Safe Buffer) // // -------------------------------------------------------------------------------- // -------------------------------------------------------------------------------- // SBuffer is a relatively safe way to manipulate a dynamically // allocated data buffer. An SBuffer is conceptually a simple array // of bytes. It maintains both a conceptual size and an actual allocated size. // // SBuffer provides safe access to the data buffer by providing rich high // level functionality (like insertion, deleteion, copying, comparison, and // iteration) without exposing direct pointers to its buffers. // // For interoperability, SBuffers can expose their buffers - either as readonly // by BYTE * or void * cases, or as writable by the OpenRawBuffer/CloseRawBuffer // entry points. Use of these should be limited wherever possible though; as there // is always a possibilility of buffer overrun. // // To mimimize heap allocations, the InlineSBuffer template will preallocate a fixed // size buffer inline with the SBuffer object itself. It will use this buffer unless // it needs a bigger one, in which case it transparently moves on to using the heap. // The StackSBuffer class instatiates the InlineSBuffer with a standard heuristic // stack preallocation size. // // SBuffer is "subclassable" to add content typeing to the buffer. See SArray and // SString for examples. // -------------------------------------------------------------------------------- #ifndef _SBUFFER_H_ #define _SBUFFER_H_ #include "clrtypes.h" #include "iterator.h" #include "check.h" #include "daccess.h" #include "memoryrange.h" // ================================================================================ // Macros for computing padding // ================================================================================ #define ALIGNMENT(size) \ (( ((size)^((size)-1)) >> 1) +1) #define ALIGN(size, align) \ (((size)+((align)-1)) & ~((align)-1)) #define PAD(size, align) \ (ALIGN((size), (align)) - (size)) // ================================================================================ // SBuffer : base class for safe buffers // ================================================================================ typedef DPTR(class SBuffer) PTR_SBuffer; class SBuffer { public: //-------------------------------------------------------------------- // Flags and constants //-------------------------------------------------------------------- enum ImmutableFlag { Immutable }; enum PreallocFlag { Prealloc }; //-------------------------------------------------------------------- // Types //-------------------------------------------------------------------- public: class CIterator; friend class CIterator; class Iterator; friend class Iterator; //-------------------------------------------------------------------- // Initializers and constructors //-------------------------------------------------------------------- public: // Constructors SBuffer(); SBuffer(COUNT_T size); SBuffer(const BYTE *buffer, COUNT_T size); explicit SBuffer(const SBuffer &buffer); // Immutable constructor should ONLY be used if buffer will // NEVER BE FREED OR MODIFIED. PERIOD. . SBuffer(ImmutableFlag immutable, const BYTE *buffer, COUNT_T size); // Prealloc should be allocated inline with SBuffer - it must have the same // lifetime as SBuffer's memory. SBuffer(PreallocFlag prealloc, void *buffer, COUNT_T size); ~SBuffer(); void Clear(); void Set(const SBuffer &buffer); void Set(const BYTE *buffer, COUNT_T size); void SetImmutable(const BYTE *buffer, COUNT_T size); //-------------------------------------------------------------------- // Buffer size routines. A buffer has an externally visible size, but // it also has an internal allocation size which may be larger. //-------------------------------------------------------------------- // Get and set size of buffer. Note that the actual size of the // internally allocated memory block may be bigger. COUNT_T GetSize() const; void SetSize(COUNT_T count); // Grow size of buffer to maximum amount without reallocating. void MaximizeSize(); //-------------------------------------------------------------------- // Buffer allocation routines //-------------------------------------------------------------------- // Return the current available allocation space of the buffer. COUNT_T GetAllocation() const; // Preallocate some memory you expect to use. This can prevent // multiple reallocations. Note this does not change the visible // size of the buffer. void Preallocate(COUNT_T allocation) const; // Shrink memory usage of buffer to minimal amount. Note that // this does not change the visible size of the buffer. void Trim() const; //-------------------------------------------------------------------- // Content manipulation routines //-------------------------------------------------------------------- void Zero(); void Fill(BYTE value); void Fill(const Iterator &to, BYTE value, COUNT_T size); // Internal copy. "Copy" leaves from range as is; "Move" // leaves from range in uninitialized state. // (This distinction is more important when using from a // typed wrapper than in the base SBuffer class.) // // NOTE: Copy vs Move is NOT based on whether ranges overlap // or not. Ranges may overlap in either case. // // Note that both Iterators must be on THIS buffer. void Copy(const Iterator &to, const CIterator &from, COUNT_T size); void Move(const Iterator &to, const CIterator &from, COUNT_T size); // External copy. void Copy(const Iterator &i, const SBuffer &source); void Copy(const Iterator &i, const void *source, COUNT_T size); void Copy(void *dest, const CIterator &i, COUNT_T size); // Insert bytes at the given iterator location. void Insert(const Iterator &i, const SBuffer &source); void Insert(const Iterator &i, COUNT_T size); // Delete bytes at the given iterator location void Delete(const Iterator &i, COUNT_T size); // Replace bytes at the given iterator location void Replace(const Iterator &i, COUNT_T deleteSize, const SBuffer &insert); void Replace(const Iterator &i, COUNT_T deleteSize, COUNT_T insertSize); // Compare entire buffer; return -1, 0, 1 int Compare(const SBuffer &compare) const; int Compare(const BYTE *match, COUNT_T size) const; // Compare entire buffer; return TRUE or FALSE BOOL Equals(const SBuffer &compare) const; BOOL Equals(const BYTE *match, COUNT_T size) const; // Match portion of this buffer to given bytes; return TRUE or FALSE BOOL Match(const CIterator &i, const SBuffer &match) const; BOOL Match(const CIterator &i, const BYTE *match, COUNT_T size) const; //-------------------------------------------------------------------- // Iterators // // Note that any iterator returned is not // valid after any operation which may resize the buffer, unless // the operation was performed on that particular iterator. //-------------------------------------------------------------------- CIterator Begin() const; CIterator End() const; Iterator Begin(); Iterator End(); BYTE & operator[] (int index); const BYTE & operator[] (int index) const; //-------------------------------------------------------------------- // Raw buffer access // // Accessing a raw buffer via pointer is inherently more dangerous than // other uses of this API, and should be avoided if at all possible. // It is primarily provided for compatibility with existing APIs. // // Note that any buffer pointer returned is not // valid after any operation which may resize the buffer. //-------------------------------------------------------------------- // Casting operators return the existing buffer as // a raw const pointer. Note that the pointer is valid only // until the buffer is modified via an API. operator const void *() const; operator const BYTE *() const; // To write directly to the SString's underlying buffer: // 1) Call OpenRawBuffer() and pass it the count of bytes // you need. // 2) That returns a pointer to the raw buffer which you can write to. // 3) When you are done writing to the pointer, call CloseBuffer() // and pass it the count of bytes you actually wrote. // The pointer from step 1 is now invalid. // example usage: // void GetInfo(SBuffer &buf) // { // BYTE *p = buf.OpenRawBuffer(3); // OSGetSomeInfo(p, 3); // buf.CloseRawBuffer(); // } // You should open the buffer, write the data, and immediately close it. // No sbuffer operations are valid while the buffer is opened. // // In a debug build, Open/Close will do lots of little checks to make sure // you don't buffer overflow while it's opened. In a retail build, this // is a very streamlined action. // Open the raw buffer for writing count bytes BYTE *OpenRawBuffer(COUNT_T maxCount); // Call after OpenRawBuffer(). // Provide the count of bytes actually used. This will make sure the // SBuffer's size is correct. void CloseRawBuffer(COUNT_T actualCount); // Close the buffer. Assumes that we completely filled the buffer // that OpenRawBuffer() gave back. void CloseRawBuffer(); //-------------------------------------------------------------------- // Check routines. These are typically used internally, but may be // called externally if desired. //-------------------------------------------------------------------- CHECK CheckBufferClosed() const; static CHECK CheckSize(COUNT_T size); static CHECK CheckAllocation(COUNT_T allocation); CHECK CheckIteratorRange(const CIterator &i) const; CHECK CheckIteratorRange(const CIterator &i, COUNT_T size) const; CHECK Check() const; CHECK Invariant() const; CHECK InternalInvariant() const; protected: //-------------------------------------------------------------------- // Internal helper routines //-------------------------------------------------------------------- // Preserve = preserve contents while reallocating typedef enum { DONT_PRESERVE = 0, PRESERVE = 1, } Preserve; void Resize(COUNT_T size, Preserve preserve = PRESERVE); void ResizePadded(COUNT_T size, Preserve preserve = PRESERVE); void TweakSize(COUNT_T size); void ReallocateBuffer(COUNT_T allocation, Preserve preserve); void EnsureMutable() const; //-------------------------------------------------------------------- // We define some extra flags and fields for subclasses (these are specifically // designed for SString, but use otherwise if desired.) //-------------------------------------------------------------------- BOOL IsFlag1() const; void SetFlag1(); void ClearFlag1(); BOOL IsFlag2() const; void SetFlag2(); void ClearFlag2(); BOOL IsFlag3() const; void SetFlag3(); void ClearFlag3(); INT GetRepresentationField() const; void SetRepresentationField(int value); protected: //-------------------------------------------------------------------- // Flag access //-------------------------------------------------------------------- BOOL IsAllocated() const; void SetAllocated(); void ClearAllocated(); BOOL IsImmutable() const; void SetImmutable(); void ClearImmutable(); #if _DEBUG BOOL IsOpened() const; void SetOpened(); void ClearOpened(); #endif //-------------------------------------------------------------------- // Buffer management routines //-------------------------------------------------------------------- // Allocate and free a memory buffer BYTE *NewBuffer(COUNT_T allocation); void DeleteBuffer(BYTE *buffer, COUNT_T allocation); // Use existing buffer BYTE *UseBuffer(BYTE *buffer, COUNT_T *allocation); CHECK CheckBuffer(const BYTE* buffer, COUNT_T allocation) const; // Manipulates contents of the buffer via the plugins below, but // adds some debugging checks. Should always call through here rather // than directly calling the extensibility points. void DebugMoveBuffer(_Out_writes_bytes_(size) BYTE *to, BYTE *from, COUNT_T size); void DebugCopyConstructBuffer(_Out_writes_bytes_(size) BYTE *to, const BYTE *from, COUNT_T size); void DebugConstructBuffer(BYTE *buffer, COUNT_T size); void DebugDestructBuffer(BYTE *buffer, COUNT_T size); void DebugStompUnusedBuffer(BYTE *buffer, COUNT_T size); #ifdef _DEBUG static BOOL EnsureGarbageCharOnly(const BYTE *buffer, COUNT_T size); #endif CHECK CheckUnusedBuffer(const BYTE *buffer, COUNT_T size) const; #ifdef DACCESS_COMPILE public: // Expose the raw Target address of the buffer to DAC. // This does not do any marshalling. This can be useful if the caller wants to allocate the buffer on // its own heap so that it can survive Flush calls. MemoryRange DacGetRawBuffer() const { SUPPORTS_DAC; PTR_VOID p = dac_cast<PTR_VOID>((TADDR) m_buffer); return MemoryRange(p, GetSize()); } protected: // Return a host copy of the buffer, allocated on the DAC heap (and thus invalidated at the next call to Flush). void* DacGetRawContent(void) const { SUPPORTS_DAC; // SBuffers are used in DAC in two ways - buffers in the host, and marshalled buffers from the target. // This is a problem - we can't reason about the address space of the buffer statically, and instead rely on // the dynamic usage (i.e. the methods are basically bifurcated into those you can use on host instances, // and those you can use on marshalled copies). // Ideally we'll have two versions of the SBuffer code - one that's marshalled (normal DACization) and one // that isn't (host-only utility). This is the "dual-mode DAC problem". // But this only affects a couple classes, and so for now we'll ignore the problem - causing a bunch of DacCop // violations. DACCOP_IGNORE(CastBetweenAddressSpaces, "SBuffer has the dual-mode DAC problem"); DACCOP_IGNORE(FieldAccess, "SBuffer has the dual-mode DAC problem"); TADDR bufAddr = (TADDR)m_buffer; return DacInstantiateTypeByAddress(bufAddr, m_size, true); } void EnumMemoryRegions(CLRDataEnumMemoryFlags flags) const { SUPPORTS_DAC; if (flags != CLRDATA_ENUM_MEM_TRIAGE) { DacEnumMemoryRegion((TADDR)m_buffer, m_size); } } #endif //---------------------------------------------------------------------------- // Iterator base class //---------------------------------------------------------------------------- friend class CheckedIteratorBase<SBuffer>; class EMPTY_BASES_DECL Index : public CheckedIteratorBase<SBuffer> { friend class SBuffer; friend class CIterator; friend class Indexer<const BYTE, CIterator>; friend class Iterator; friend class Indexer<BYTE, Iterator>; protected: BYTE* m_ptr; Index(); Index(SBuffer *container, SCOUNT_T index); BYTE &GetAt(SCOUNT_T delta) const; void Skip(SCOUNT_T delta); SCOUNT_T Subtract(const Index &i) const; CHECK DoCheck(SCOUNT_T delta) const; void Resync(const SBuffer *container, BYTE *value) const; }; public: class EMPTY_BASES_DECL CIterator : public Index, public Indexer<const BYTE, CIterator> { friend class SBuffer; public: CIterator() { } CIterator(const SBuffer *buffer, int index) : Index(const_cast<SBuffer*>(buffer), index) { } }; class EMPTY_BASES_DECL Iterator : public Index, public Indexer<BYTE, Iterator> { friend class SBuffer; public: operator const CIterator &() const { return *(const CIterator *)this; } operator CIterator &() { return *(CIterator *)this; } Iterator() { } Iterator(SBuffer *buffer, int index) : Index(buffer, index) { } }; //---------------------------------------------------------------------------- // Member and data declarations //---------------------------------------------------------------------------- private: enum { REPRESENTATION_MASK = 0x07, ALLOCATED = 0x08, IMMUTABLE = 0x10, OPENED = 0x20, FLAG1 = 0x40, FLAG2 = 0x80, FLAG3 = 0x100, }; COUNT_T m_size; // externally visible size COUNT_T m_allocation; // actual allocated size UINT32 m_flags; // @todo: steal flags from sizes protected: union { BYTE *m_buffer; WCHAR *m_asStr; // For debugging, view as a unicode string }; #if _DEBUG protected: // We will update the "revision" of the buffer every time it is potentially reallocation, // so we can tell when iterators are no longer valid. int m_revision; #endif }; // ================================================================================ // InlineSBuffer : Tlempate for an SBuffer with preallocated buffer space // ================================================================================ #define BUFFER_ALIGNMENT 4 template <COUNT_T size> class EMPTY_BASES_DECL InlineSBuffer : public SBuffer { private: #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable:4200) // zero sized array #pragma warning(disable:4324) // don't complain if DECLSPEC_ALIGN actually pads DECLSPEC_ALIGN(BUFFER_ALIGNMENT) BYTE m_prealloc[size]; #pragma warning(pop) #else // use UINT64 to get maximum alignment of the memory UINT64 m_prealloc[ALIGN(size,sizeof(UINT64))/sizeof(UINT64)]; #endif // _MSC_VER public: InlineSBuffer() : SBuffer(Prealloc, (BYTE*)m_prealloc, size) { WRAPPER_NO_CONTRACT; } }; // a 1K sized buffer filled with $ that we'll use in debug builds for verification #define GARBAGE_FILL_DWORD 0x24242424 // $$$$ #define GARBAGE_FILL_BUFFER_ITEMS 16 #define GARBAGE_FILL_BUFFER_SIZE GARBAGE_FILL_BUFFER_ITEMS*sizeof(DWORD) // ================================================================================ // StackSBuffer : SBuffer with relatively large preallocated buffer for stack use // ================================================================================ #define STACK_ALLOC 256 typedef InlineSBuffer<STACK_ALLOC> StackSBuffer; // ================================================================================ // Inline definitions // ================================================================================ /// a wrapper for templates and such, that use "==". /// more expensive than a typical "==", though inline BOOL operator == (const SBuffer& b1,const SBuffer& b2) { return b1.Equals(b2); }; #include <sbuffer.inl> #endif // _SBUFFER_H_
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // -------------------------------------------------------------------------------- // SBuffer.h (Safe Buffer) // // -------------------------------------------------------------------------------- // -------------------------------------------------------------------------------- // SBuffer is a relatively safe way to manipulate a dynamically // allocated data buffer. An SBuffer is conceptually a simple array // of bytes. It maintains both a conceptual size and an actual allocated size. // // SBuffer provides safe access to the data buffer by providing rich high // level functionality (like insertion, deleteion, copying, comparison, and // iteration) without exposing direct pointers to its buffers. // // For interoperability, SBuffers can expose their buffers - either as readonly // by BYTE * or void * cases, or as writable by the OpenRawBuffer/CloseRawBuffer // entry points. Use of these should be limited wherever possible though; as there // is always a possibilility of buffer overrun. // // To mimimize heap allocations, the InlineSBuffer template will preallocate a fixed // size buffer inline with the SBuffer object itself. It will use this buffer unless // it needs a bigger one, in which case it transparently moves on to using the heap. // The StackSBuffer class instatiates the InlineSBuffer with a standard heuristic // stack preallocation size. // // SBuffer is "subclassable" to add content typeing to the buffer. See SArray and // SString for examples. // -------------------------------------------------------------------------------- #ifndef _SBUFFER_H_ #define _SBUFFER_H_ #include "clrtypes.h" #include "iterator.h" #include "check.h" #include "daccess.h" #include "memoryrange.h" // ================================================================================ // Macros for computing padding // ================================================================================ #define ALIGNMENT(size) \ (( ((size)^((size)-1)) >> 1) +1) #define ALIGN(size, align) \ (((size)+((align)-1)) & ~((align)-1)) #define PAD(size, align) \ (ALIGN((size), (align)) - (size)) // ================================================================================ // SBuffer : base class for safe buffers // ================================================================================ typedef DPTR(class SBuffer) PTR_SBuffer; class SBuffer { public: //-------------------------------------------------------------------- // Flags and constants //-------------------------------------------------------------------- enum ImmutableFlag { Immutable }; enum PreallocFlag { Prealloc }; //-------------------------------------------------------------------- // Types //-------------------------------------------------------------------- public: class CIterator; friend class CIterator; class Iterator; friend class Iterator; //-------------------------------------------------------------------- // Initializers and constructors //-------------------------------------------------------------------- public: // Constructors SBuffer(); SBuffer(COUNT_T size); SBuffer(const BYTE *buffer, COUNT_T size); explicit SBuffer(const SBuffer &buffer); // Immutable constructor should ONLY be used if buffer will // NEVER BE FREED OR MODIFIED. PERIOD. . SBuffer(ImmutableFlag immutable, const BYTE *buffer, COUNT_T size); // Prealloc should be allocated inline with SBuffer - it must have the same // lifetime as SBuffer's memory. SBuffer(PreallocFlag prealloc, void *buffer, COUNT_T size); ~SBuffer(); void Clear(); void Set(const SBuffer &buffer); void Set(const BYTE *buffer, COUNT_T size); void SetImmutable(const BYTE *buffer, COUNT_T size); //-------------------------------------------------------------------- // Buffer size routines. A buffer has an externally visible size, but // it also has an internal allocation size which may be larger. //-------------------------------------------------------------------- // Get and set size of buffer. Note that the actual size of the // internally allocated memory block may be bigger. COUNT_T GetSize() const; void SetSize(COUNT_T count); // Grow size of buffer to maximum amount without reallocating. void MaximizeSize(); //-------------------------------------------------------------------- // Buffer allocation routines //-------------------------------------------------------------------- // Return the current available allocation space of the buffer. COUNT_T GetAllocation() const; // Preallocate some memory you expect to use. This can prevent // multiple reallocations. Note this does not change the visible // size of the buffer. void Preallocate(COUNT_T allocation) const; // Shrink memory usage of buffer to minimal amount. Note that // this does not change the visible size of the buffer. void Trim() const; //-------------------------------------------------------------------- // Content manipulation routines //-------------------------------------------------------------------- void Zero(); void Fill(BYTE value); void Fill(const Iterator &to, BYTE value, COUNT_T size); // Internal copy. "Copy" leaves from range as is; "Move" // leaves from range in uninitialized state. // (This distinction is more important when using from a // typed wrapper than in the base SBuffer class.) // // NOTE: Copy vs Move is NOT based on whether ranges overlap // or not. Ranges may overlap in either case. // // Note that both Iterators must be on THIS buffer. void Copy(const Iterator &to, const CIterator &from, COUNT_T size); void Move(const Iterator &to, const CIterator &from, COUNT_T size); // External copy. void Copy(const Iterator &i, const SBuffer &source); void Copy(const Iterator &i, const void *source, COUNT_T size); void Copy(void *dest, const CIterator &i, COUNT_T size); // Insert bytes at the given iterator location. void Insert(const Iterator &i, const SBuffer &source); void Insert(const Iterator &i, COUNT_T size); // Delete bytes at the given iterator location void Delete(const Iterator &i, COUNT_T size); // Replace bytes at the given iterator location void Replace(const Iterator &i, COUNT_T deleteSize, const SBuffer &insert); void Replace(const Iterator &i, COUNT_T deleteSize, COUNT_T insertSize); // Compare entire buffer; return -1, 0, 1 int Compare(const SBuffer &compare) const; int Compare(const BYTE *match, COUNT_T size) const; // Compare entire buffer; return TRUE or FALSE BOOL Equals(const SBuffer &compare) const; BOOL Equals(const BYTE *match, COUNT_T size) const; // Match portion of this buffer to given bytes; return TRUE or FALSE BOOL Match(const CIterator &i, const SBuffer &match) const; BOOL Match(const CIterator &i, const BYTE *match, COUNT_T size) const; //-------------------------------------------------------------------- // Iterators // // Note that any iterator returned is not // valid after any operation which may resize the buffer, unless // the operation was performed on that particular iterator. //-------------------------------------------------------------------- CIterator Begin() const; CIterator End() const; Iterator Begin(); Iterator End(); BYTE & operator[] (int index); const BYTE & operator[] (int index) const; //-------------------------------------------------------------------- // Raw buffer access // // Accessing a raw buffer via pointer is inherently more dangerous than // other uses of this API, and should be avoided if at all possible. // It is primarily provided for compatibility with existing APIs. // // Note that any buffer pointer returned is not // valid after any operation which may resize the buffer. //-------------------------------------------------------------------- // Casting operators return the existing buffer as // a raw const pointer. Note that the pointer is valid only // until the buffer is modified via an API. operator const void *() const; operator const BYTE *() const; // To write directly to the SString's underlying buffer: // 1) Call OpenRawBuffer() and pass it the count of bytes // you need. // 2) That returns a pointer to the raw buffer which you can write to. // 3) When you are done writing to the pointer, call CloseBuffer() // and pass it the count of bytes you actually wrote. // The pointer from step 1 is now invalid. // example usage: // void GetInfo(SBuffer &buf) // { // BYTE *p = buf.OpenRawBuffer(3); // OSGetSomeInfo(p, 3); // buf.CloseRawBuffer(); // } // You should open the buffer, write the data, and immediately close it. // No sbuffer operations are valid while the buffer is opened. // // In a debug build, Open/Close will do lots of little checks to make sure // you don't buffer overflow while it's opened. In a retail build, this // is a very streamlined action. // Open the raw buffer for writing count bytes BYTE *OpenRawBuffer(COUNT_T maxCount); // Call after OpenRawBuffer(). // Provide the count of bytes actually used. This will make sure the // SBuffer's size is correct. void CloseRawBuffer(COUNT_T actualCount); // Close the buffer. Assumes that we completely filled the buffer // that OpenRawBuffer() gave back. void CloseRawBuffer(); //-------------------------------------------------------------------- // Check routines. These are typically used internally, but may be // called externally if desired. //-------------------------------------------------------------------- CHECK CheckBufferClosed() const; static CHECK CheckSize(COUNT_T size); static CHECK CheckAllocation(COUNT_T allocation); CHECK CheckIteratorRange(const CIterator &i) const; CHECK CheckIteratorRange(const CIterator &i, COUNT_T size) const; CHECK Check() const; CHECK Invariant() const; CHECK InternalInvariant() const; protected: //-------------------------------------------------------------------- // Internal helper routines //-------------------------------------------------------------------- // Preserve = preserve contents while reallocating typedef enum { DONT_PRESERVE = 0, PRESERVE = 1, } Preserve; void Resize(COUNT_T size, Preserve preserve = PRESERVE); void ResizePadded(COUNT_T size, Preserve preserve = PRESERVE); void TweakSize(COUNT_T size); void ReallocateBuffer(COUNT_T allocation, Preserve preserve); void EnsureMutable() const; //-------------------------------------------------------------------- // We define some extra flags and fields for subclasses (these are specifically // designed for SString, but use otherwise if desired.) //-------------------------------------------------------------------- BOOL IsFlag1() const; void SetFlag1(); void ClearFlag1(); BOOL IsFlag2() const; void SetFlag2(); void ClearFlag2(); BOOL IsFlag3() const; void SetFlag3(); void ClearFlag3(); INT GetRepresentationField() const; void SetRepresentationField(int value); protected: //-------------------------------------------------------------------- // Flag access //-------------------------------------------------------------------- BOOL IsAllocated() const; void SetAllocated(); void ClearAllocated(); BOOL IsImmutable() const; void SetImmutable(); void ClearImmutable(); #if _DEBUG BOOL IsOpened() const; void SetOpened(); void ClearOpened(); #endif //-------------------------------------------------------------------- // Buffer management routines //-------------------------------------------------------------------- // Allocate and free a memory buffer BYTE *NewBuffer(COUNT_T allocation); void DeleteBuffer(BYTE *buffer, COUNT_T allocation); // Use existing buffer BYTE *UseBuffer(BYTE *buffer, COUNT_T *allocation); CHECK CheckBuffer(const BYTE* buffer, COUNT_T allocation) const; // Manipulates contents of the buffer via the plugins below, but // adds some debugging checks. Should always call through here rather // than directly calling the extensibility points. void DebugMoveBuffer(_Out_writes_bytes_(size) BYTE *to, BYTE *from, COUNT_T size); void DebugCopyConstructBuffer(_Out_writes_bytes_(size) BYTE *to, const BYTE *from, COUNT_T size); void DebugConstructBuffer(BYTE *buffer, COUNT_T size); void DebugDestructBuffer(BYTE *buffer, COUNT_T size); void DebugStompUnusedBuffer(BYTE *buffer, COUNT_T size); #ifdef _DEBUG static BOOL EnsureGarbageCharOnly(const BYTE *buffer, COUNT_T size); #endif CHECK CheckUnusedBuffer(const BYTE *buffer, COUNT_T size) const; #ifdef DACCESS_COMPILE public: // Expose the raw Target address of the buffer to DAC. // This does not do any marshalling. This can be useful if the caller wants to allocate the buffer on // its own heap so that it can survive Flush calls. MemoryRange DacGetRawBuffer() const { SUPPORTS_DAC; PTR_VOID p = dac_cast<PTR_VOID>((TADDR) m_buffer); return MemoryRange(p, GetSize()); } protected: // Return a host copy of the buffer, allocated on the DAC heap (and thus invalidated at the next call to Flush). void* DacGetRawContent(void) const { SUPPORTS_DAC; // SBuffers are used in DAC in two ways - buffers in the host, and marshalled buffers from the target. // This is a problem - we can't reason about the address space of the buffer statically, and instead rely on // the dynamic usage (i.e. the methods are basically bifurcated into those you can use on host instances, // and those you can use on marshalled copies). // Ideally we'll have two versions of the SBuffer code - one that's marshalled (normal DACization) and one // that isn't (host-only utility). This is the "dual-mode DAC problem". // But this only affects a couple classes, and so for now we'll ignore the problem - causing a bunch of DacCop // violations. DACCOP_IGNORE(CastBetweenAddressSpaces, "SBuffer has the dual-mode DAC problem"); DACCOP_IGNORE(FieldAccess, "SBuffer has the dual-mode DAC problem"); TADDR bufAddr = (TADDR)m_buffer; return DacInstantiateTypeByAddress(bufAddr, m_size, true); } void EnumMemoryRegions(CLRDataEnumMemoryFlags flags) const { SUPPORTS_DAC; if (flags != CLRDATA_ENUM_MEM_TRIAGE) { DacEnumMemoryRegion((TADDR)m_buffer, m_size); } } #endif //---------------------------------------------------------------------------- // Iterator base class //---------------------------------------------------------------------------- friend class CheckedIteratorBase<SBuffer>; class EMPTY_BASES_DECL Index : public CheckedIteratorBase<SBuffer> { friend class SBuffer; friend class CIterator; friend class Indexer<const BYTE, CIterator>; friend class Iterator; friend class Indexer<BYTE, Iterator>; protected: BYTE* m_ptr; Index(); Index(SBuffer *container, SCOUNT_T index); BYTE &GetAt(SCOUNT_T delta) const; void Skip(SCOUNT_T delta); SCOUNT_T Subtract(const Index &i) const; CHECK DoCheck(SCOUNT_T delta) const; void Resync(const SBuffer *container, BYTE *value) const; }; public: class EMPTY_BASES_DECL CIterator : public Index, public Indexer<const BYTE, CIterator> { friend class SBuffer; public: CIterator() { } CIterator(const SBuffer *buffer, int index) : Index(const_cast<SBuffer*>(buffer), index) { } }; class EMPTY_BASES_DECL Iterator : public Index, public Indexer<BYTE, Iterator> { friend class SBuffer; public: operator const CIterator &() const { return *(const CIterator *)this; } operator CIterator &() { return *(CIterator *)this; } Iterator() { } Iterator(SBuffer *buffer, int index) : Index(buffer, index) { } }; //---------------------------------------------------------------------------- // Member and data declarations //---------------------------------------------------------------------------- private: enum { REPRESENTATION_MASK = 0x07, ALLOCATED = 0x08, IMMUTABLE = 0x10, OPENED = 0x20, FLAG1 = 0x40, FLAG2 = 0x80, FLAG3 = 0x100, }; COUNT_T m_size; // externally visible size COUNT_T m_allocation; // actual allocated size UINT32 m_flags; // @todo: steal flags from sizes protected: union { BYTE *m_buffer; WCHAR *m_asStr; // For debugging, view as a unicode string }; #if _DEBUG protected: // We will update the "revision" of the buffer every time it is potentially reallocation, // so we can tell when iterators are no longer valid. int m_revision; #endif }; // ================================================================================ // InlineSBuffer : Tlempate for an SBuffer with preallocated buffer space // ================================================================================ #define BUFFER_ALIGNMENT 4 template <COUNT_T size> class EMPTY_BASES_DECL InlineSBuffer : public SBuffer { private: #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable:4200) // zero sized array #pragma warning(disable:4324) // don't complain if DECLSPEC_ALIGN actually pads DECLSPEC_ALIGN(BUFFER_ALIGNMENT) BYTE m_prealloc[size]; #pragma warning(pop) #else // use UINT64 to get maximum alignment of the memory UINT64 m_prealloc[ALIGN(size,sizeof(UINT64))/sizeof(UINT64)]; #endif // _MSC_VER public: InlineSBuffer() : SBuffer(Prealloc, (BYTE*)m_prealloc, size) { WRAPPER_NO_CONTRACT; } }; // a 1K sized buffer filled with $ that we'll use in debug builds for verification #define GARBAGE_FILL_DWORD 0x24242424 // $$$$ #define GARBAGE_FILL_BUFFER_ITEMS 16 #define GARBAGE_FILL_BUFFER_SIZE GARBAGE_FILL_BUFFER_ITEMS*sizeof(DWORD) // ================================================================================ // StackSBuffer : SBuffer with relatively large preallocated buffer for stack use // ================================================================================ #define STACK_ALLOC 256 typedef InlineSBuffer<STACK_ALLOC> StackSBuffer; // ================================================================================ // Inline definitions // ================================================================================ /// a wrapper for templates and such, that use "==". /// more expensive than a typical "==", though inline BOOL operator == (const SBuffer& b1,const SBuffer& b2) { return b1.Equals(b2); }; #include <sbuffer.inl> #endif // _SBUFFER_H_
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/inc/readytoruninstructionset.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // DO NOT EDIT THIS FILE! IT IS AUTOGENERATED // FROM /src/coreclr/tools/Common/JitInterface/ThunkGenerator/InstructionSetDesc.txt // using /src/coreclr/tools/Common/JitInterface/ThunkGenerator/gen.bat #ifndef READYTORUNINSTRUCTIONSET_H #define READYTORUNINSTRUCTIONSET_H enum ReadyToRunInstructionSet { READYTORUN_INSTRUCTION_Sse=1, READYTORUN_INSTRUCTION_Sse2=2, READYTORUN_INSTRUCTION_Sse3=3, READYTORUN_INSTRUCTION_Ssse3=4, READYTORUN_INSTRUCTION_Sse41=5, READYTORUN_INSTRUCTION_Sse42=6, READYTORUN_INSTRUCTION_Avx=7, READYTORUN_INSTRUCTION_Avx2=8, READYTORUN_INSTRUCTION_Aes=9, READYTORUN_INSTRUCTION_Bmi1=10, READYTORUN_INSTRUCTION_Bmi2=11, READYTORUN_INSTRUCTION_Fma=12, READYTORUN_INSTRUCTION_Lzcnt=13, READYTORUN_INSTRUCTION_Pclmulqdq=14, READYTORUN_INSTRUCTION_Popcnt=15, READYTORUN_INSTRUCTION_ArmBase=16, READYTORUN_INSTRUCTION_AdvSimd=17, READYTORUN_INSTRUCTION_Crc32=18, READYTORUN_INSTRUCTION_Sha1=19, READYTORUN_INSTRUCTION_Sha256=20, READYTORUN_INSTRUCTION_Atomics=21, READYTORUN_INSTRUCTION_X86Base=22, READYTORUN_INSTRUCTION_Dp=23, READYTORUN_INSTRUCTION_Rdm=24, READYTORUN_INSTRUCTION_AvxVnni=25, }; #endif // READYTORUNINSTRUCTIONSET_H
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // DO NOT EDIT THIS FILE! IT IS AUTOGENERATED // FROM /src/coreclr/tools/Common/JitInterface/ThunkGenerator/InstructionSetDesc.txt // using /src/coreclr/tools/Common/JitInterface/ThunkGenerator/gen.bat #ifndef READYTORUNINSTRUCTIONSET_H #define READYTORUNINSTRUCTIONSET_H enum ReadyToRunInstructionSet { READYTORUN_INSTRUCTION_Sse=1, READYTORUN_INSTRUCTION_Sse2=2, READYTORUN_INSTRUCTION_Sse3=3, READYTORUN_INSTRUCTION_Ssse3=4, READYTORUN_INSTRUCTION_Sse41=5, READYTORUN_INSTRUCTION_Sse42=6, READYTORUN_INSTRUCTION_Avx=7, READYTORUN_INSTRUCTION_Avx2=8, READYTORUN_INSTRUCTION_Aes=9, READYTORUN_INSTRUCTION_Bmi1=10, READYTORUN_INSTRUCTION_Bmi2=11, READYTORUN_INSTRUCTION_Fma=12, READYTORUN_INSTRUCTION_Lzcnt=13, READYTORUN_INSTRUCTION_Pclmulqdq=14, READYTORUN_INSTRUCTION_Popcnt=15, READYTORUN_INSTRUCTION_ArmBase=16, READYTORUN_INSTRUCTION_AdvSimd=17, READYTORUN_INSTRUCTION_Crc32=18, READYTORUN_INSTRUCTION_Sha1=19, READYTORUN_INSTRUCTION_Sha256=20, READYTORUN_INSTRUCTION_Atomics=21, READYTORUN_INSTRUCTION_X86Base=22, READYTORUN_INSTRUCTION_Dp=23, READYTORUN_INSTRUCTION_Rdm=24, READYTORUN_INSTRUCTION_AvxVnni=25, }; #endif // READYTORUNINSTRUCTIONSET_H
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/tests/profiler/native/multiple/multiple.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include "../profiler.h" class MultiplyLoaded : public Profiler { public: MultiplyLoaded() : Profiler() {} static GUID GetClsid(); virtual HRESULT STDMETHODCALLTYPE Initialize(IUnknown* pICorProfilerInfoUnk); virtual HRESULT STDMETHODCALLTYPE InitializeForAttach(IUnknown* pICorProfilerInfoUnk, void* pvClientData, UINT cbClientData); virtual HRESULT STDMETHODCALLTYPE Shutdown(); virtual HRESULT STDMETHODCALLTYPE LoadAsNotificationOnly(BOOL *pbNotificationOnly); virtual HRESULT STDMETHODCALLTYPE ProfilerDetachSucceeded(); virtual HRESULT STDMETHODCALLTYPE ExceptionThrown(ObjectID thrownObjectId); private: static std::atomic<int> _exceptionThrownSeenCount; static std::atomic<int> _detachCount; static std::atomic<int> _failures; HRESULT InitializeCommon(IUnknown* pCorProfilerInfoUnk); };
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include "../profiler.h" class MultiplyLoaded : public Profiler { public: MultiplyLoaded() : Profiler() {} static GUID GetClsid(); virtual HRESULT STDMETHODCALLTYPE Initialize(IUnknown* pICorProfilerInfoUnk); virtual HRESULT STDMETHODCALLTYPE InitializeForAttach(IUnknown* pICorProfilerInfoUnk, void* pvClientData, UINT cbClientData); virtual HRESULT STDMETHODCALLTYPE Shutdown(); virtual HRESULT STDMETHODCALLTYPE LoadAsNotificationOnly(BOOL *pbNotificationOnly); virtual HRESULT STDMETHODCALLTYPE ProfilerDetachSucceeded(); virtual HRESULT STDMETHODCALLTYPE ExceptionThrown(ObjectID thrownObjectId); private: static std::atomic<int> _exceptionThrownSeenCount; static std::atomic<int> _detachCount; static std::atomic<int> _failures; HRESULT InitializeCommon(IUnknown* pCorProfilerInfoUnk); };
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/pal/tests/palsuite/miscellaneous/GlobalMemoryStatusEx/test1/test.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source : test.c ** ** Purpose: Test for GlobalMemoryStatusEx() function ** ** **=========================================================*/ #include <palsuite.h> PALTEST(miscellaneous_GlobalMemoryStatusEx_test1_paltest_globalmemorystatusex_test1, "miscellaneous/GlobalMemoryStatusEx/test1/paltest_globalmemorystatusex_test1") { MEMORYSTATUSEX memoryStatus; /* * Initialize the PAL and return FAILURE if this fails */ if(0 != (PAL_Initialize(argc, argv))) { return FAIL; } if (!GlobalMemoryStatusEx(&memoryStatus)) { Fail("ERROR: GlobalMemoryStatusEx failed."); } printf("GlobalMemoryStatusEx:\n"); printf(" ullTotalPhys: %llu\n", memoryStatus.ullTotalPhys); printf(" ullAvailPhys: %llu\n", memoryStatus.ullAvailPhys); printf(" ullTotalVirtual: %llu\n", memoryStatus.ullTotalVirtual); printf(" ullAvailVirtual: %llu\n", memoryStatus.ullAvailVirtual); printf(" ullTotalPageFile: %llu\n", memoryStatus.ullTotalPageFile); printf(" ullAvailPageFile: %llu\n", memoryStatus.ullAvailPageFile); printf(" ullAvailExtendedVirtual: %llu\n", memoryStatus.ullAvailExtendedVirtual); printf(" dwMemoryLoad: %u\n", memoryStatus.dwMemoryLoad); if (memoryStatus.ullTotalPhys == 0 || memoryStatus.ullAvailPhys == 0 || memoryStatus.ullTotalVirtual == 0 || memoryStatus.ullAvailVirtual == 0 ) { Fail("ERROR: GlobalMemoryStatusEx succeeded, but returned zero physical of virtual memory sizes."); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source : test.c ** ** Purpose: Test for GlobalMemoryStatusEx() function ** ** **=========================================================*/ #include <palsuite.h> PALTEST(miscellaneous_GlobalMemoryStatusEx_test1_paltest_globalmemorystatusex_test1, "miscellaneous/GlobalMemoryStatusEx/test1/paltest_globalmemorystatusex_test1") { MEMORYSTATUSEX memoryStatus; /* * Initialize the PAL and return FAILURE if this fails */ if(0 != (PAL_Initialize(argc, argv))) { return FAIL; } if (!GlobalMemoryStatusEx(&memoryStatus)) { Fail("ERROR: GlobalMemoryStatusEx failed."); } printf("GlobalMemoryStatusEx:\n"); printf(" ullTotalPhys: %llu\n", memoryStatus.ullTotalPhys); printf(" ullAvailPhys: %llu\n", memoryStatus.ullAvailPhys); printf(" ullTotalVirtual: %llu\n", memoryStatus.ullTotalVirtual); printf(" ullAvailVirtual: %llu\n", memoryStatus.ullAvailVirtual); printf(" ullTotalPageFile: %llu\n", memoryStatus.ullTotalPageFile); printf(" ullAvailPageFile: %llu\n", memoryStatus.ullAvailPageFile); printf(" ullAvailExtendedVirtual: %llu\n", memoryStatus.ullAvailExtendedVirtual); printf(" dwMemoryLoad: %u\n", memoryStatus.dwMemoryLoad); if (memoryStatus.ullTotalPhys == 0 || memoryStatus.ullAvailPhys == 0 || memoryStatus.ullTotalVirtual == 0 || memoryStatus.ullAvailVirtual == 0 ) { Fail("ERROR: GlobalMemoryStatusEx succeeded, but returned zero physical of virtual memory sizes."); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/mono/mono/metadata/metadata-update.h
/** * \file */ #ifndef __MONO_METADATA_UPDATE_H__ #define __MONO_METADATA_UPDATE_H__ #include "mono/utils/mono-forward.h" #include "mono/utils/bsearch.h" #include "mono/metadata/loader-internals.h" #include "mono/metadata/metadata-internals.h" void mono_metadata_update_init (void); enum MonoModifiableAssemblies { /* modifiable assemblies are disabled */ MONO_MODIFIABLE_ASSM_NONE = 0, /* assemblies with the Debug flag are modifiable */ MONO_MODIFIABLE_ASSM_DEBUG = 1, }; typedef MonoStreamHeader* (*MetadataHeapGetterFunc) (MonoImage*); gboolean mono_metadata_update_available (void); gboolean mono_metadata_update_enabled (int *modifiable_assemblies_out); gboolean mono_metadata_update_no_inline (MonoMethod *caller, MonoMethod *callee); uint32_t mono_metadata_update_thread_expose_published (void); uint32_t mono_metadata_update_get_thread_generation (void); void mono_metadata_update_cleanup_on_close (MonoImage *base_image); void mono_metadata_update_image_close_except_pools_all (MonoImage *base_image); void mono_metadata_update_image_close_all (MonoImage *base_image); gpointer mono_metadata_update_get_updated_method_rva (MonoImage *base_image, uint32_t idx); gpointer mono_metadata_update_get_updated_method_ppdb (MonoImage *base_image, uint32_t idx); gboolean mono_metadata_update_table_bounds_check (MonoImage *base_image, int table_index, int token_index); gboolean mono_metadata_update_delta_heap_lookup (MonoImage *base_image, MetadataHeapGetterFunc get_heap, uint32_t orig_index, MonoImage **image_out, uint32_t *index_out); void* mono_metadata_update_metadata_linear_search (MonoImage *base_image, MonoTableInfo *base_table, const void *key, BinarySearchComparer comparer); MonoMethod* mono_metadata_update_find_method_by_name (MonoClass *klass, const char *name, int param_count, int flags, MonoError *error); uint32_t mono_metadata_update_get_field_idx (MonoClassField *field); MonoClassField * mono_metadata_update_get_field (MonoClass *klass, uint32_t fielddef_token); gpointer mono_metadata_update_get_static_field_addr (MonoClassField *field); #endif /*__MONO_METADATA_UPDATE_H__*/
/** * \file */ #ifndef __MONO_METADATA_UPDATE_H__ #define __MONO_METADATA_UPDATE_H__ #include "mono/utils/mono-forward.h" #include "mono/utils/bsearch.h" #include "mono/metadata/loader-internals.h" #include "mono/metadata/metadata-internals.h" void mono_metadata_update_init (void); enum MonoModifiableAssemblies { /* modifiable assemblies are disabled */ MONO_MODIFIABLE_ASSM_NONE = 0, /* assemblies with the Debug flag are modifiable */ MONO_MODIFIABLE_ASSM_DEBUG = 1, }; typedef MonoStreamHeader* (*MetadataHeapGetterFunc) (MonoImage*); gboolean mono_metadata_update_available (void); gboolean mono_metadata_update_enabled (int *modifiable_assemblies_out); gboolean mono_metadata_update_no_inline (MonoMethod *caller, MonoMethod *callee); uint32_t mono_metadata_update_thread_expose_published (void); uint32_t mono_metadata_update_get_thread_generation (void); void mono_metadata_update_cleanup_on_close (MonoImage *base_image); void mono_metadata_update_image_close_except_pools_all (MonoImage *base_image); void mono_metadata_update_image_close_all (MonoImage *base_image); gpointer mono_metadata_update_get_updated_method_rva (MonoImage *base_image, uint32_t idx); gpointer mono_metadata_update_get_updated_method_ppdb (MonoImage *base_image, uint32_t idx); gboolean mono_metadata_update_table_bounds_check (MonoImage *base_image, int table_index, int token_index); gboolean mono_metadata_update_delta_heap_lookup (MonoImage *base_image, MetadataHeapGetterFunc get_heap, uint32_t orig_index, MonoImage **image_out, uint32_t *index_out); void* mono_metadata_update_metadata_linear_search (MonoImage *base_image, MonoTableInfo *base_table, const void *key, BinarySearchComparer comparer); MonoMethod* mono_metadata_update_find_method_by_name (MonoClass *klass, const char *name, int param_count, int flags, MonoError *error); uint32_t mono_metadata_update_get_field_idx (MonoClassField *field); MonoClassField * mono_metadata_update_get_field (MonoClass *klass, uint32_t fielddef_token); gpointer mono_metadata_update_get_static_field_addr (MonoClassField *field); #endif /*__MONO_METADATA_UPDATE_H__*/
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/native/libs/System.Globalization.Native/pal_localeNumberData.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #pragma once #include "pal_locale.h" #include "pal_compiler.h" // Enum that corresponds to managed enum CultureData.LocaleNumberData. // The numeric values of the enum members match their Win32 counterparts. typedef enum { LocaleNumber_LanguageId = 0x01, LocaleNumber_MeasurementSystem = 0x0D, LocaleNumber_FractionalDigitsCount = 0x00000011, LocaleNumber_NegativeNumberFormat = 0x00001010, LocaleNumber_MonetaryFractionalDigitsCount = 0x00000019, LocaleNumber_PositiveMonetaryNumberFormat = 0x0000001B, LocaleNumber_NegativeMonetaryNumberFormat = 0x0000001C, LocaleNumber_FirstDayofWeek = 0x0000100C, LocaleNumber_FirstWeekOfYear = 0x0000100D, LocaleNumber_ReadingLayout = 0x00000070, LocaleNumber_NegativePercentFormat = 0x00000074, LocaleNumber_PositivePercentFormat = 0x00000075, LocaleNumber_Digit = 0x00000010, LocaleNumber_Monetary = 0x00000018 } LocaleNumberData; // Enum that corresponds to managed enum System.Globalization.CalendarWeekRule typedef enum { WeekRule_FirstDay = 0, WeekRule_FirstFullWeek = 1, WeekRule_FirstFourDayWeek = 2 } CalendarWeekRule; PALEXPORT int32_t GlobalizationNative_GetLocaleInfoInt(const UChar* localeName, LocaleNumberData localeNumberData, int32_t* value); PALEXPORT int32_t GlobalizationNative_GetLocaleInfoGroupingSizes(const UChar* localeName, LocaleNumberData localeGroupingData, int32_t* primaryGroupSize, int32_t* secondaryGroupSize);
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #pragma once #include "pal_locale.h" #include "pal_compiler.h" // Enum that corresponds to managed enum CultureData.LocaleNumberData. // The numeric values of the enum members match their Win32 counterparts. typedef enum { LocaleNumber_LanguageId = 0x01, LocaleNumber_MeasurementSystem = 0x0D, LocaleNumber_FractionalDigitsCount = 0x00000011, LocaleNumber_NegativeNumberFormat = 0x00001010, LocaleNumber_MonetaryFractionalDigitsCount = 0x00000019, LocaleNumber_PositiveMonetaryNumberFormat = 0x0000001B, LocaleNumber_NegativeMonetaryNumberFormat = 0x0000001C, LocaleNumber_FirstDayofWeek = 0x0000100C, LocaleNumber_FirstWeekOfYear = 0x0000100D, LocaleNumber_ReadingLayout = 0x00000070, LocaleNumber_NegativePercentFormat = 0x00000074, LocaleNumber_PositivePercentFormat = 0x00000075, LocaleNumber_Digit = 0x00000010, LocaleNumber_Monetary = 0x00000018 } LocaleNumberData; // Enum that corresponds to managed enum System.Globalization.CalendarWeekRule typedef enum { WeekRule_FirstDay = 0, WeekRule_FirstFullWeek = 1, WeekRule_FirstFourDayWeek = 2 } CalendarWeekRule; PALEXPORT int32_t GlobalizationNative_GetLocaleInfoInt(const UChar* localeName, LocaleNumberData localeNumberData, int32_t* value); PALEXPORT int32_t GlobalizationNative_GetLocaleInfoGroupingSizes(const UChar* localeName, LocaleNumberData localeGroupingData, int32_t* primaryGroupSize, int32_t* secondaryGroupSize);
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/vm/threads.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // THREADS.CPP // // // #include "common.h" #include "frames.h" #include "threads.h" #include "stackwalk.h" #include "excep.h" #include "comsynchronizable.h" #include "log.h" #include "gcheaputilities.h" #include "mscoree.h" #include "dbginterface.h" #include "corprof.h" // profiling #include "eeprofinterfaces.h" #include "eeconfig.h" #include "corhost.h" #include "win32threadpool.h" #include "jitinterface.h" #include "eventtrace.h" #include "comutilnative.h" #include "finalizerthread.h" #include "threadsuspend.h" #include "wrappers.h" #include "nativeoverlapped.h" #include "appdomain.inl" #include "vmholder.h" #include "exceptmacros.h" #include "win32threadpool.h" #ifdef FEATURE_COMINTEROP #include "runtimecallablewrapper.h" #include "interoputil.h" #include "interoputil.inl" #endif // FEATURE_COMINTEROP #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT #include "olecontexthelpers.h" #include "roapi.h" #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT #ifdef FEATURE_SPECIAL_USER_MODE_APC #include "asmconstants.h" #endif static const PortableTailCallFrame g_sentinelTailCallFrame = { NULL, NULL }; TailCallTls::TailCallTls() // A new frame will always be allocated before the frame is modified, // so casting away const is ok here. : m_frame(const_cast<PortableTailCallFrame*>(&g_sentinelTailCallFrame)) , m_argBuffer(NULL) { } Thread* STDCALL GetThreadHelper() { return GetThreadNULLOk(); } TailCallArgBuffer* TailCallTls::AllocArgBuffer(int size, void* gcDesc) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END _ASSERTE(size >= (int)offsetof(TailCallArgBuffer, Args)); if (m_argBuffer != NULL && m_argBuffer->Size < size) { FreeArgBuffer(); } if (m_argBuffer == NULL) { m_argBuffer = (TailCallArgBuffer*)new (nothrow) BYTE[size]; if (m_argBuffer == NULL) return NULL; m_argBuffer->Size = size; } m_argBuffer->State = TAILCALLARGBUFFER_ACTIVE; m_argBuffer->GCDesc = gcDesc; if (gcDesc != NULL) { memset(m_argBuffer->Args, 0, size - offsetof(TailCallArgBuffer, Args)); } return m_argBuffer; } #if defined (_DEBUG_IMPL) || defined(_PREFAST_) thread_local int t_ForbidGCLoaderUseCount; #endif uint64_t Thread::dead_threads_non_alloc_bytes = 0; SPTR_IMPL(ThreadStore, ThreadStore, s_pThreadStore); CONTEXT* ThreadStore::s_pOSContext = NULL; BYTE* ThreadStore::s_pOSContextBuffer = NULL; CLREvent *ThreadStore::s_pWaitForStackCrawlEvent; PTR_ThreadLocalModule ThreadLocalBlock::GetTLMIfExists(ModuleIndex index) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; if (index.m_dwIndex >= m_TLMTableSize) return NULL; return m_pTLMTable[index.m_dwIndex].pTLM; } PTR_ThreadLocalModule ThreadLocalBlock::GetTLMIfExists(MethodTable* pMT) { WRAPPER_NO_CONTRACT; ModuleIndex index = pMT->GetModuleForStatics()->GetModuleIndex(); return GetTLMIfExists(index); } #ifndef DACCESS_COMPILE BOOL Thread::s_fCleanFinalizedThread = FALSE; UINT64 Thread::s_workerThreadPoolCompletionCountOverflow = 0; UINT64 Thread::s_ioThreadPoolCompletionCountOverflow = 0; UINT64 Thread::s_monitorLockContentionCountOverflow = 0; CrstStatic g_DeadlockAwareCrst; // // A transient thread value that indicates this thread is currently walking its stack // or the stack of another thread. This value is useful to help short-circuit // some problematic checks in the loader, guarantee that types & assemblies // encountered during the walk must already be loaded, and provide information to control // assembly loading behavior during stack walks. // // This value is set around the main portions of the stack walk (as those portions may // enter the type & assembly loaders). This is also explicitly cleared while the // walking thread calls the stackwalker callback or needs to execute managed code, as // such calls may execute arbitrary code unrelated to the actual stack walking, and // may never return, in the case of exception stackwalk callbacks. // thread_local Thread* t_pStackWalkerWalkingThread; #if defined(_DEBUG) BOOL MatchThreadHandleToOsId ( HANDLE h, DWORD osId ) { #ifndef TARGET_UNIX LIMITED_METHOD_CONTRACT; DWORD id = GetThreadId(h); // OS call GetThreadId may fail, and return 0. In this case we can not // make a decision if the two match or not. Instead, we ignore this check. return id == 0 || id == osId; #else // !TARGET_UNIX return TRUE; #endif // !TARGET_UNIX } #endif // _DEBUG #ifdef _DEBUG_IMPL template<> AutoCleanupGCAssert<TRUE>::AutoCleanupGCAssert() { SCAN_SCOPE_BEGIN; STATIC_CONTRACT_MODE_COOPERATIVE; } template<> AutoCleanupGCAssert<FALSE>::AutoCleanupGCAssert() { SCAN_SCOPE_BEGIN; STATIC_CONTRACT_MODE_PREEMPTIVE; } template<> void GCAssert<TRUE>::BeginGCAssert() { SCAN_SCOPE_BEGIN; STATIC_CONTRACT_MODE_COOPERATIVE; } template<> void GCAssert<FALSE>::BeginGCAssert() { SCAN_SCOPE_BEGIN; STATIC_CONTRACT_MODE_PREEMPTIVE; } #endif // #define NEW_TLS 1 #ifdef _DEBUG void Thread::SetFrame(Frame *pFrame) { CONTRACTL { NOTHROW; GC_NOTRIGGER; DEBUG_ONLY; MODE_COOPERATIVE; // It only makes sense for a Thread to call SetFrame on itself. PRECONDITION(this == GetThread()); PRECONDITION(CheckPointer(pFrame)); } CONTRACTL_END; if (g_pConfig->fAssertOnFailFast()) { Frame *pWalk = m_pFrame; BOOL fExist = FALSE; while (pWalk != (Frame*) -1) { if (pWalk == pFrame) { fExist = TRUE; break; } pWalk = pWalk->m_Next; } pWalk = m_pFrame; while (fExist && pWalk != pFrame && pWalk != (Frame*)-1) { pWalk = pWalk->m_Next; } } m_pFrame = pFrame; // If stack overrun corruptions are expected, then skip this check // as the Frame chain may have been corrupted. if (g_pConfig->fAssertOnFailFast() == false) return; Frame* espVal = (Frame*)GetCurrentSP(); while (pFrame != (Frame*) -1) { static Frame* stopFrame = 0; if (pFrame == stopFrame) _ASSERTE(!"SetFrame frame == stopFrame"); _ASSERTE(IsExecutingOnAltStack() || espVal < pFrame); _ASSERTE(IsExecutingOnAltStack() || pFrame < m_CacheStackBase); _ASSERTE(pFrame->GetFrameType() < Frame::TYPE_COUNT); pFrame = pFrame->m_Next; } } #endif // _DEBUG //************************************************************************ // PRIVATE GLOBALS //************************************************************************ extern unsigned __int64 getTimeStamp(); extern unsigned __int64 getTickFrequency(); unsigned __int64 tgetFrequency() { static unsigned __int64 cachedFreq = (unsigned __int64) -1; if (cachedFreq != (unsigned __int64) -1) return cachedFreq; else { cachedFreq = getTickFrequency(); return cachedFreq; } } #endif // #ifndef DACCESS_COMPILE static StackWalkAction DetectHandleILStubsForDebugger_StackWalkCallback(CrawlFrame *pCF, VOID *pData) { WRAPPER_NO_CONTRACT; // It suffices to wait for the first CrawlFrame with non-NULL function MethodDesc *pMD = pCF->GetFunction(); if (pMD != NULL) { *(bool *)pData = pMD->IsILStub(); return SWA_ABORT; } return SWA_CONTINUE; } // This is really just a heuristic to detect if we are executing in an M2U IL stub or // one of the marshaling methods it calls. It doesn't deal with U2M IL stubs. // We loop through the frame chain looking for an uninitialized TransitionFrame. // If there is one, then we are executing in an M2U IL stub or one of the methods it calls. // On the other hand, if there is an initialized TransitionFrame, then we are not. // Also, if there is an HMF on the stack, then we stop. This could be the case where // an IL stub calls an FCALL which ends up in a managed method, and the debugger wants to // stop in those cases. Some examples are COMException..ctor and custom marshalers. // // X86 IL stubs use InlinedCallFrame and are indistinguishable from ordinary methods with // inlined P/Invoke when judging just from the frame chain. We use stack walk to decide // this case. bool Thread::DetectHandleILStubsForDebugger() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; Frame* pFrame = GetFrame(); if (pFrame != NULL) { while (pFrame != FRAME_TOP) { // Check for HMF's. See the comment at the beginning of this function. if (pFrame->GetVTablePtr() == HelperMethodFrame::GetMethodFrameVPtr()) { break; } // If there is an entry frame (i.e. U2M managed), we should break. else if (pFrame->GetFrameType() == Frame::TYPE_ENTRY) { break; } // Check for M2U transition frames. See the comment at the beginning of this function. else if (pFrame->GetFrameType() == Frame::TYPE_EXIT) { if (pFrame->GetReturnAddress() == NULL) { // If the return address is NULL, then the frame has not been initialized yet. // We may see InlinedCallFrame in ordinary methods as well. Have to do // stack walk to find out if this is really an IL stub. bool fInILStub = false; StackWalkFrames(&DetectHandleILStubsForDebugger_StackWalkCallback, &fInILStub, QUICKUNWIND, dac_cast<PTR_Frame>(pFrame)); if (fInILStub) return true; } else { // The frame is fully initialized. return false; } } pFrame = pFrame->Next(); } } return false; } #ifndef _MSC_VER __thread ThreadLocalInfo gCurrentThreadInfo; #endif #ifndef DACCESS_COMPILE void SetThread(Thread* t) { LIMITED_METHOD_CONTRACT gCurrentThreadInfo.m_pThread = t; if (t != NULL) { EnsureTlsDestructionMonitor(); } } void SetAppDomain(AppDomain* ad) { LIMITED_METHOD_CONTRACT gCurrentThreadInfo.m_pAppDomain = ad; } BOOL Thread::Alert () { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; BOOL fRetVal = FALSE; { HANDLE handle = GetThreadHandle(); if (handle != INVALID_HANDLE_VALUE) { fRetVal = ::QueueUserAPC(UserInterruptAPC, handle, APC_Code); } } return fRetVal; } DWORD Thread::Join(DWORD timeout, BOOL alertable) { WRAPPER_NO_CONTRACT; return JoinEx(timeout,alertable?WaitMode_Alertable:WaitMode_None); } DWORD Thread::JoinEx(DWORD timeout, WaitMode mode) { CONTRACTL { THROWS; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; BOOL alertable = (mode & WaitMode_Alertable)?TRUE:FALSE; Thread *pCurThread = GetThreadNULLOk(); _ASSERTE(pCurThread || dbgOnly_IsSpecialEEThread()); { // We're not hosted, so WaitMode_InDeadlock is irrelevant. Clear it, so that this wait can be // forwarded to a SynchronizationContext if needed. mode = (WaitMode)(mode & ~WaitMode_InDeadlock); HANDLE handle = GetThreadHandle(); if (handle == INVALID_HANDLE_VALUE) { return WAIT_FAILED; } if (pCurThread) { return pCurThread->DoAppropriateWait(1, &handle, FALSE, timeout, mode); } else { return WaitForSingleObjectEx(handle,timeout,alertable); } } } extern INT32 MapFromNTPriority(INT32 NTPriority); BOOL Thread::SetThreadPriority( int nPriority // thread priority level ) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; BOOL fRet; { if (GetThreadHandle() == INVALID_HANDLE_VALUE) { // When the thread starts running, we will set the thread priority. fRet = TRUE; } else fRet = ::SetThreadPriority(GetThreadHandle(), nPriority); } if (fRet) { GCX_COOP(); THREADBASEREF pObject = (THREADBASEREF)ObjectFromHandle(m_ExposedObject); if (pObject != NULL) { // TODO: managed ThreadPriority only supports up to 4. pObject->SetPriority (MapFromNTPriority(nPriority)); } } return fRet; } int Thread::GetThreadPriority() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; int nRetVal = -1; if (GetThreadHandle() == INVALID_HANDLE_VALUE) { nRetVal = FALSE; } else nRetVal = ::GetThreadPriority(GetThreadHandle()); return nRetVal; } void Thread::ChooseThreadCPUGroupAffinity() { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; #ifndef TARGET_UNIX if (!CPUGroupInfo::CanEnableGCCPUGroups() || !CPUGroupInfo::CanEnableThreadUseAllCpuGroups() || !CPUGroupInfo::CanAssignCpuGroupsToThreads()) { return; } //Borrow the ThreadStore Lock here: Lock ThreadStore before distributing threads ThreadStoreLockHolder TSLockHolder(TRUE); // this thread already has CPU group affinity set if (m_pAffinityMask != 0) return; if (GetThreadHandle() == INVALID_HANDLE_VALUE) return; GROUP_AFFINITY groupAffinity; CPUGroupInfo::ChooseCPUGroupAffinity(&groupAffinity); CPUGroupInfo::SetThreadGroupAffinity(GetThreadHandle(), &groupAffinity, NULL); m_wCPUGroup = groupAffinity.Group; m_pAffinityMask = groupAffinity.Mask; #endif // !TARGET_UNIX } void Thread::ClearThreadCPUGroupAffinity() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; #ifndef TARGET_UNIX if (!CPUGroupInfo::CanEnableGCCPUGroups() || !CPUGroupInfo::CanEnableThreadUseAllCpuGroups() || !CPUGroupInfo::CanAssignCpuGroupsToThreads()) { return; } ThreadStoreLockHolder TSLockHolder(TRUE); // this thread does not have CPU group affinity set if (m_pAffinityMask == 0) return; GROUP_AFFINITY groupAffinity; groupAffinity.Group = m_wCPUGroup; groupAffinity.Mask = m_pAffinityMask; CPUGroupInfo::ClearCPUGroupAffinity(&groupAffinity); m_wCPUGroup = 0; m_pAffinityMask = 0; #endif // !TARGET_UNIX } DWORD Thread::StartThread() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; #ifdef _DEBUG _ASSERTE (m_Creator.IsCurrentThread()); m_Creator.Clear(); #endif _ASSERTE (GetThreadHandle() != INVALID_HANDLE_VALUE); DWORD dwRetVal = ::ResumeThread(GetThreadHandle()); return dwRetVal; } // Class static data: LONG Thread::m_DebugWillSyncCount = -1; LONG Thread::m_DetachCount = 0; LONG Thread::m_ActiveDetachCount = 0; static void DeleteThread(Thread* pThread) { CONTRACTL { NOTHROW; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; //_ASSERTE (pThread == GetThread()); SetThread(NULL); SetAppDomain(NULL); if (pThread->HasThreadStateNC(Thread::TSNC_ExistInThreadStore)) { pThread->DetachThread(FALSE); } else { #ifdef FEATURE_COMINTEROP pThread->RevokeApartmentSpy(); #endif // FEATURE_COMINTEROP FastInterlockOr((ULONG *)&pThread->m_State, Thread::TS_Dead); // ~Thread() calls SafeSetThrowables which has a conditional contract // which says that if you call it with a NULL throwable then it is // MODE_ANY, otherwise MODE_COOPERATIVE. Scan doesn't understand that // and assumes that we're violating the MODE_COOPERATIVE. CONTRACT_VIOLATION(ModeViolation); delete pThread; } } static void EnsurePreemptive() { WRAPPER_NO_CONTRACT; Thread *pThread = GetThreadNULLOk(); if (pThread && pThread->PreemptiveGCDisabled()) { pThread->EnablePreemptiveGC(); } } typedef StateHolder<DoNothing, EnsurePreemptive> EnsurePreemptiveModeIfException; Thread* SetupThread() { CONTRACTL { THROWS; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; Thread* pThread; if ((pThread = GetThreadNULLOk()) != NULL) return pThread; // For interop debugging, we must mark that we're in a can't-stop region // b.c we may take Crsts here that may block the helper thread. // We're especially fragile here b/c we don't have a Thread object yet CantStopHolder hCantStop; EnsurePreemptiveModeIfException ensurePreemptive; #ifdef _DEBUG CHECK chk; if (g_pConfig->SuppressChecks()) { // EnterAssert will suppress any checks chk.EnterAssert(); } #endif // Normally, HasStarted is called from the thread's entrypoint to introduce it to // the runtime. But sometimes that thread is used for DLL_THREAD_ATTACH notifications // that call into managed code. In that case, a call to SetupThread here must // find the correct Thread object and install it into TLS. if (ThreadStore::s_pThreadStore->GetPendingThreadCount() != 0) { DWORD ourOSThreadId = ::GetCurrentThreadId(); { ThreadStoreLockHolder TSLockHolder; _ASSERTE(pThread == NULL); while ((pThread = ThreadStore::s_pThreadStore->GetAllThreadList(pThread, Thread::TS_Unstarted | Thread::TS_FailStarted, Thread::TS_Unstarted)) != NULL) { if (pThread->GetOSThreadId() == ourOSThreadId) { break; } } if (pThread != NULL) { STRESS_LOG2(LF_SYNC, LL_INFO1000, "T::ST - recycling thread 0x%p (state: 0x%x)\n", pThread, pThread->m_State.Load()); } } // It's perfectly reasonable to not find the thread. It's just an unrelated // thread spinning up. if (pThread) { if (IsThreadPoolWorkerSpecialThread()) { FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread); pThread->SetBackground(TRUE); } else if (IsThreadPoolIOCompletionSpecialThread()) { FastInterlockOr ((ULONG *) &pThread->m_State, Thread::TS_CompletionPortThread); pThread->SetBackground(TRUE); } else if (IsTimerSpecialThread() || IsWaitSpecialThread()) { FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread); pThread->SetBackground(TRUE); } BOOL fStatus = pThread->HasStarted(); ensurePreemptive.SuppressRelease(); return fStatus ? pThread : NULL; } } // First time we've seen this thread in the runtime: pThread = new Thread(); // What state are we in here? COOP??? Holder<Thread*,DoNothing<Thread*>,DeleteThread> threadHolder(pThread); SetupTLSForThread(); pThread->InitThread(); pThread->PrepareApartmentAndContext(); // reset any unstarted bits on the thread object FastInterlockAnd((ULONG *) &pThread->m_State, ~Thread::TS_Unstarted); FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_LegalToJoin); ThreadStore::AddThread(pThread); SetThread(pThread); SetAppDomain(pThread->GetDomain()); #ifdef FEATURE_INTEROP_DEBUGGING // Ensure that debugger word slot is allocated TlsSetValue(g_debuggerWordTLSIndex, 0); #endif // We now have a Thread object visable to the RS. unmark special status. hCantStop.Release(); threadHolder.SuppressRelease(); FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_FullyInitialized); #ifdef DEBUGGING_SUPPORTED // // If we're debugging, let the debugger know that this // thread is up and running now. // if (CORDebuggerAttached()) { g_pDebugInterface->ThreadCreated(pThread); } else { LOG((LF_CORDB, LL_INFO10000, "ThreadCreated() not called due to CORDebuggerAttached() being FALSE for thread 0x%x\n", pThread->GetThreadId())); } #endif // DEBUGGING_SUPPORTED #ifdef PROFILING_SUPPORTED // If a profiler is present, then notify the profiler that a // thread has been created. if (!IsGCSpecialThread()) { BEGIN_PROFILER_CALLBACK(CORProfilerTrackThreads()); { GCX_PREEMP(); (&g_profControlBlock)->ThreadCreated( (ThreadID)pThread); } DWORD osThreadId = ::GetCurrentThreadId(); (&g_profControlBlock)->ThreadAssignedToOSThread( (ThreadID)pThread, osThreadId); END_PROFILER_CALLBACK(); } #endif // PROFILING_SUPPORTED _ASSERTE(!pThread->IsBackground()); // doesn't matter, but worth checking pThread->SetBackground(TRUE); ensurePreemptive.SuppressRelease(); if (IsThreadPoolWorkerSpecialThread()) { FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread); } else if (IsThreadPoolIOCompletionSpecialThread()) { FastInterlockOr ((ULONG *) &pThread->m_State, Thread::TS_CompletionPortThread); } else if (IsTimerSpecialThread() || IsWaitSpecialThread()) { FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread); } #ifdef FEATURE_EVENT_TRACE ETW::ThreadLog::FireThreadCreated(pThread); #endif // FEATURE_EVENT_TRACE return pThread; } //------------------------------------------------------------------------- // Public function: SetupThreadNoThrow() // Creates Thread for current thread if not previously created. // Returns NULL for failure (usually due to out-of-memory.) //------------------------------------------------------------------------- Thread* SetupThreadNoThrow(HRESULT *pHR) { CONTRACTL { NOTHROW; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; HRESULT hr = S_OK; Thread *pThread = GetThreadNULLOk(); if (pThread != NULL) { return pThread; } EX_TRY { pThread = SetupThread(); } EX_CATCH { // We failed SetupThread. GET_EXCEPTION() may depend on Thread object. if (__pException == NULL) { hr = E_OUTOFMEMORY; } else { hr = GET_EXCEPTION()->GetHR(); } } EX_END_CATCH(SwallowAllExceptions); if (pHR) { *pHR = hr; } return pThread; } //------------------------------------------------------------------------- // Public function: SetupUnstartedThread() // This sets up a Thread object for an exposed System.Thread that // has not been started yet. This allows us to properly enumerate all threads // in the ThreadStore, so we can report on even unstarted threads. Clearly // there is no physical thread to match, yet. // // When there is, complete the setup with code:Thread::HasStarted() //------------------------------------------------------------------------- Thread* SetupUnstartedThread(SetupUnstartedThreadFlags flags) { CONTRACTL { THROWS; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; Thread* pThread = new Thread(); if (flags & SUTF_ThreadStoreLockAlreadyTaken) { _ASSERTE(ThreadStore::HoldingThreadStore()); pThread->SetThreadStateNC(Thread::TSNC_TSLTakenForStartup); } FastInterlockOr((ULONG *) &pThread->m_State, (Thread::TS_Unstarted | Thread::TS_WeOwn)); ThreadStore::AddThread(pThread); return pThread; } //------------------------------------------------------------------------- // Public function: DestroyThread() // Destroys the specified Thread object, for a thread which is about to die. //------------------------------------------------------------------------- void DestroyThread(Thread *th) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; _ASSERTE (th == GetThread()); GCX_PREEMP_NO_DTOR(); if (th->IsAbortRequested()) { // Reset trapping count. th->UnmarkThreadForAbort(); } // Clear any outstanding stale EH state that maybe still active on the thread. #ifdef FEATURE_EH_FUNCLETS ExceptionTracker::PopTrackers((void*)-1); #else // !FEATURE_EH_FUNCLETS #ifdef TARGET_X86 PTR_ThreadExceptionState pExState = th->GetExceptionState(); if (pExState->IsExceptionInProgress()) { GCX_COOP(); pExState->GetCurrentExceptionTracker()->UnwindExInfo((void *)-1); } #else // !TARGET_X86 #error Unsupported platform #endif // TARGET_X86 #endif // FEATURE_EH_FUNCLETS if (g_fEEShutDown == 0) { th->SetThreadState(Thread::TS_ReportDead); th->OnThreadTerminate(FALSE); } } //------------------------------------------------------------------------- // Public function: DetachThread() // Marks the thread as needing to be destroyed, but doesn't destroy it yet. //------------------------------------------------------------------------- HRESULT Thread::DetachThread(BOOL fDLLThreadDetach) { // !!! Can not use contract here. // !!! Contract depends on Thread object for GC_TRIGGERS. // !!! At the end of this function, we call InternalSwitchOut, // !!! and then GetThread()=NULL, and dtor of contract does not work any more. STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; // Clear any outstanding stale EH state that maybe still active on the thread. #ifdef FEATURE_EH_FUNCLETS ExceptionTracker::PopTrackers((void*)-1); #else // !FEATURE_EH_FUNCLETS #ifdef TARGET_X86 PTR_ThreadExceptionState pExState = GetExceptionState(); if (pExState->IsExceptionInProgress()) { GCX_COOP(); pExState->GetCurrentExceptionTracker()->UnwindExInfo((void *)-1); } #else // !TARGET_X86 #error Unsupported platform #endif // TARGET_X86 #endif // FEATURE_EH_FUNCLETS #ifdef FEATURE_COMINTEROP IErrorInfo *pErrorInfo; // Avoid calling GetErrorInfo() if ole32 has already executed the DLL_THREAD_DETACH, // otherwise we'll cause ole32 to re-allocate and leak its TLS data (SOleTlsData). if (ClrTeb::GetOleReservedPtr() != NULL && GetErrorInfo(0, &pErrorInfo) == S_OK) { // if this is our IErrorInfo, release it now - we don't want ole32 to do it later as // part of its DLL_THREAD_DETACH as we won't be able to handle the call at that point if (!ComInterfaceSlotIs(pErrorInfo, 2, Unknown_ReleaseSpecial_IErrorInfo)) { // if it's not our IErrorInfo, put it back SetErrorInfo(0, pErrorInfo); } pErrorInfo->Release(); } // Revoke our IInitializeSpy registration only if we are not in DLL_THREAD_DETACH // (COM will do it or may have already done it automatically in that case). if (!fDLLThreadDetach) { RevokeApartmentSpy(); } #endif // FEATURE_COMINTEROP _ASSERTE(!PreemptiveGCDisabled()); _ASSERTE ((m_State & Thread::TS_Detached) == 0); _ASSERTE (this == GetThread()); FastInterlockIncrement(&Thread::m_DetachCount); if (IsAbortRequested()) { // Reset trapping count. UnmarkThreadForAbort(); } if (!IsBackground()) { FastInterlockIncrement(&Thread::m_ActiveDetachCount); ThreadStore::CheckForEEShutdown(); } HANDLE hThread = GetThreadHandle(); SetThreadHandle (INVALID_HANDLE_VALUE); while (m_dwThreadHandleBeingUsed > 0) { // Another thread is using the handle now. #undef Sleep // We can not call __SwitchToThread since we can not go back to host. ::Sleep(10); #define Sleep(a) Dont_Use_Sleep(a) } if (m_WeOwnThreadHandle && m_ThreadHandleForClose == INVALID_HANDLE_VALUE) { m_ThreadHandleForClose = hThread; } // We need to make sure that TLS are touched last here. SetThread(NULL); SetAppDomain(NULL); FastInterlockOr((ULONG*)&m_State, (int) (Thread::TS_Detached | Thread::TS_ReportDead)); // Do not touch Thread object any more. It may be destroyed. // These detached threads will be cleaned up by finalizer thread. But if the process uses // little managed heap, it will be a while before GC happens, and finalizer thread starts // working on detached thread. So we wake up finalizer thread to clean up resources. // // (It's possible that this is the startup thread, and startup failed, and so the finalization // machinery isn't fully initialized. Hence this check.) if (g_fEEStarted) FinalizerThread::EnableFinalization(); return S_OK; } DWORD GetRuntimeId() { LIMITED_METHOD_CONTRACT; #ifdef HOST_WINDOWS return _tls_index; #else return 0; #endif } //--------------------------------------------------------------------------- // Creates new Thread for reverse p-invoke calls. //--------------------------------------------------------------------------- Thread* WINAPI CreateThreadBlockThrow() { WRAPPER_NO_CONTRACT; // This is a workaround to disable our check for throwing exception in SetupThread. // We want to throw an exception for reverse p-invoke, and our assertion may fire if // a unmanaged caller does not setup an exception handler. CONTRACT_VIOLATION(ThrowsViolation); // WON'T FIX - This enables catastrophic failure exception in reverse P/Invoke - the only way we can communicate an error to legacy code. Thread* pThread = NULL; BEGIN_ENTRYPOINT_THROWS; HRESULT hr = S_OK; pThread = SetupThreadNoThrow(&hr); if (pThread == NULL) { // Creating Thread failed, and we need to throw an exception to report status. // It is misleading to use our COM+ exception code, since this is not a managed exception. ULONG_PTR arg = hr; RaiseException(EXCEPTION_EXX, 0, 1, &arg); } END_ENTRYPOINT_THROWS; return pThread; } #ifdef _DEBUG DWORD_PTR Thread::OBJREF_HASH = OBJREF_TABSIZE; #endif extern "C" void STDCALL JIT_PatchedCodeStart(); extern "C" void STDCALL JIT_PatchedCodeLast(); static void* s_barrierCopy = NULL; BYTE* GetWriteBarrierCodeLocation(VOID* barrier) { if (IsWriteBarrierCopyEnabled()) { return (BYTE*)PINSTRToPCODE((TADDR)s_barrierCopy + ((TADDR)barrier - (TADDR)JIT_PatchedCodeStart)); } else { return (BYTE*)barrier; } } BOOL IsIPInWriteBarrierCodeCopy(PCODE controlPc) { if (IsWriteBarrierCopyEnabled()) { return (s_barrierCopy <= (void*)controlPc && (void*)controlPc < ((BYTE*)s_barrierCopy + ((BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart))); } else { return FALSE; } } PCODE AdjustWriteBarrierIP(PCODE controlPc) { _ASSERTE(IsIPInWriteBarrierCodeCopy(controlPc)); // Pretend we were executing the barrier function at its original location so that the unwinder can unwind the frame return (PCODE)JIT_PatchedCodeStart + (controlPc - (PCODE)s_barrierCopy); } #ifdef TARGET_X86 extern "C" void *JIT_WriteBarrierEAX_Loc; #else extern "C" void *JIT_WriteBarrier_Loc; #endif #ifdef TARGET_ARM64 extern "C" void (*JIT_WriteBarrier_Table)(); extern "C" void *JIT_WriteBarrier_Loc; void *JIT_WriteBarrier_Loc = 0; extern "C" void *JIT_WriteBarrier_Table_Loc; void *JIT_WriteBarrier_Table_Loc = 0; #endif // TARGET_ARM64 #ifdef TARGET_ARM extern "C" void *JIT_WriteBarrier_Loc = 0; #endif // TARGET_ARM #ifndef TARGET_UNIX // g_TlsIndex is only used by the DAC. Disable optimizations around it to prevent it from getting optimized out. #pragma optimize("", off) static void SetIlsIndex(DWORD tlsIndex) { g_TlsIndex = tlsIndex; } #pragma optimize("", on) #endif //--------------------------------------------------------------------------- // One-time initialization. Called during Dll initialization. So // be careful what you do in here! //--------------------------------------------------------------------------- void InitThreadManager() { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; // All patched helpers should fit into one page. // If you hit this assert on retail build, there is most likely problem with BBT script. _ASSERTE_ALL_BUILDS("clr/src/VM/threads.cpp", (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart > (ptrdiff_t)0); _ASSERTE_ALL_BUILDS("clr/src/VM/threads.cpp", (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart < (ptrdiff_t)GetOsPageSize()); if (IsWriteBarrierCopyEnabled()) { s_barrierCopy = ExecutableAllocator::Instance()->Reserve(g_SystemInfo.dwAllocationGranularity); ExecutableAllocator::Instance()->Commit(s_barrierCopy, g_SystemInfo.dwAllocationGranularity, true); if (s_barrierCopy == NULL) { _ASSERTE(!"Allocation of GC barrier code page failed"); COMPlusThrowWin32(); } { size_t writeBarrierSize = (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart; ExecutableWriterHolder<void> barrierWriterHolder(s_barrierCopy, writeBarrierSize); memcpy(barrierWriterHolder.GetRW(), (BYTE*)JIT_PatchedCodeStart, writeBarrierSize); } // Store the JIT_WriteBarrier copy location to a global variable so that helpers // can jump to it. #ifdef TARGET_X86 JIT_WriteBarrierEAX_Loc = GetWriteBarrierCodeLocation((void*)JIT_WriteBarrierEAX); #define X86_WRITE_BARRIER_REGISTER(reg) \ SetJitHelperFunction(CORINFO_HELP_ASSIGN_REF_##reg, GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier##reg)); \ ETW::MethodLog::StubInitialized((ULONGLONG)GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier##reg), W("@WriteBarrier" #reg)); ENUM_X86_WRITE_BARRIER_REGISTERS() #undef X86_WRITE_BARRIER_REGISTER #else // TARGET_X86 JIT_WriteBarrier_Loc = GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier); #endif // TARGET_X86 SetJitHelperFunction(CORINFO_HELP_ASSIGN_REF, GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier)); ETW::MethodLog::StubInitialized((ULONGLONG)GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier), W("@WriteBarrier")); #ifdef TARGET_ARM64 // Store the JIT_WriteBarrier_Table copy location to a global variable so that it can be updated. JIT_WriteBarrier_Table_Loc = GetWriteBarrierCodeLocation((void*)&JIT_WriteBarrier_Table); #endif // TARGET_ARM64 #if defined(TARGET_ARM64) || defined(TARGET_ARM) SetJitHelperFunction(CORINFO_HELP_CHECKED_ASSIGN_REF, GetWriteBarrierCodeLocation((void*)JIT_CheckedWriteBarrier)); ETW::MethodLog::StubInitialized((ULONGLONG)GetWriteBarrierCodeLocation((void*)JIT_CheckedWriteBarrier), W("@CheckedWriteBarrier")); SetJitHelperFunction(CORINFO_HELP_ASSIGN_BYREF, GetWriteBarrierCodeLocation((void*)JIT_ByRefWriteBarrier)); ETW::MethodLog::StubInitialized((ULONGLONG)GetWriteBarrierCodeLocation((void*)JIT_ByRefWriteBarrier), W("@ByRefWriteBarrier")); #endif // TARGET_ARM64 || TARGET_ARM } else { // I am using virtual protect to cover the entire range that this code falls in. // // We could reset it to non-writeable inbetween GCs and such, but then we'd have to keep on re-writing back and forth, // so instead we'll leave it writable from here forward. DWORD oldProt; if (!ClrVirtualProtect((void *)JIT_PatchedCodeStart, (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart, PAGE_EXECUTE_READWRITE, &oldProt)) { _ASSERTE(!"ClrVirtualProtect of code page failed"); COMPlusThrowWin32(); } #ifdef TARGET_X86 JIT_WriteBarrierEAX_Loc = (void*)JIT_WriteBarrierEAX; #else JIT_WriteBarrier_Loc = (void*)JIT_WriteBarrier; #endif #ifdef TARGET_ARM64 // Store the JIT_WriteBarrier_Table copy location to a global variable so that it can be updated. JIT_WriteBarrier_Table_Loc = (void*)&JIT_WriteBarrier_Table; #endif // TARGET_ARM64 } #ifndef TARGET_UNIX _ASSERTE(GetThreadNULLOk() == NULL); size_t offsetOfCurrentThreadInfo = Thread::GetOffsetOfThreadStatic(&gCurrentThreadInfo); _ASSERTE(offsetOfCurrentThreadInfo < 0x8000); _ASSERTE(_tls_index < 0x10000); // Save gCurrentThreadInfo location for debugger SetIlsIndex((DWORD)(_tls_index + (offsetOfCurrentThreadInfo << 16) + 0x80000000)); _ASSERTE(g_TrapReturningThreads == 0); #endif // !TARGET_UNIX #ifdef FEATURE_INTEROP_DEBUGGING g_debuggerWordTLSIndex = TlsAlloc(); if (g_debuggerWordTLSIndex == TLS_OUT_OF_INDEXES) COMPlusThrowWin32(); #endif IfFailThrow(Thread::CLRSetThreadStackGuarantee(Thread::STSGuarantee_Force)); ThreadStore::InitThreadStore(); // NOTE: CRST_UNSAFE_ANYMODE prevents a GC mode switch when entering this crst. // If you remove this flag, we will switch to preemptive mode when entering // g_DeadlockAwareCrst, which means all functions that enter it will become // GC_TRIGGERS. (This includes all uses of CrstHolder.) So be sure // to update the contracts if you remove this flag. g_DeadlockAwareCrst.Init(CrstDeadlockDetection, CRST_UNSAFE_ANYMODE); #ifdef _DEBUG // Randomize OBJREF_HASH to handle hash collision. Thread::OBJREF_HASH = OBJREF_TABSIZE - (DbgGetEXETimeStamp()%10); #endif // _DEBUG ThreadSuspend::Initialize(); } //************************************************************************ // Thread members //************************************************************************ #if defined(_DEBUG) && defined(TRACK_SYNC) // One outstanding synchronization held by this thread: struct Dbg_TrackSyncEntry { UINT_PTR m_caller; AwareLock *m_pAwareLock; BOOL Equiv (UINT_PTR caller, void *pAwareLock) { LIMITED_METHOD_CONTRACT; return (m_caller == caller) && (m_pAwareLock == pAwareLock); } BOOL Equiv (void *pAwareLock) { LIMITED_METHOD_CONTRACT; return (m_pAwareLock == pAwareLock); } }; // Each thread has a stack that tracks all enter and leave requests struct Dbg_TrackSyncStack : public Dbg_TrackSync { enum { MAX_TRACK_SYNC = 20, // adjust stack depth as necessary }; void EnterSync (UINT_PTR caller, void *pAwareLock); void LeaveSync (UINT_PTR caller, void *pAwareLock); Dbg_TrackSyncEntry m_Stack [MAX_TRACK_SYNC]; UINT_PTR m_StackPointer; BOOL m_Active; Dbg_TrackSyncStack() : m_StackPointer(0), m_Active(TRUE) { LIMITED_METHOD_CONTRACT; } }; void Dbg_TrackSyncStack::EnterSync(UINT_PTR caller, void *pAwareLock) { LIMITED_METHOD_CONTRACT; STRESS_LOG4(LF_SYNC, LL_INFO100, "Dbg_TrackSyncStack::EnterSync, IP=%p, Recursion=%u, LockState=%x, HoldingThread=%p.\n", caller, ((AwareLock*)pAwareLock)->GetRecursionLevel(), ((AwareLock*)pAwareLock)->GetLockState(), ((AwareLock*)pAwareLock)->GetHoldingThread()); if (m_Active) { if (m_StackPointer >= MAX_TRACK_SYNC) { _ASSERTE(!"Overflowed synchronization stack checking. Disabling"); m_Active = FALSE; return; } } m_Stack[m_StackPointer].m_caller = caller; m_Stack[m_StackPointer].m_pAwareLock = (AwareLock *) pAwareLock; m_StackPointer++; } void Dbg_TrackSyncStack::LeaveSync(UINT_PTR caller, void *pAwareLock) { WRAPPER_NO_CONTRACT; STRESS_LOG4(LF_SYNC, LL_INFO100, "Dbg_TrackSyncStack::LeaveSync, IP=%p, Recursion=%u, LockState=%x, HoldingThread=%p.\n", caller, ((AwareLock*)pAwareLock)->GetRecursionLevel(), ((AwareLock*)pAwareLock)->GetLockState(), ((AwareLock*)pAwareLock)->GetHoldingThread()); if (m_Active) { if (m_StackPointer == 0) _ASSERTE(!"Underflow in leaving synchronization"); else if (m_Stack[m_StackPointer - 1].Equiv(pAwareLock)) { m_StackPointer--; } else { for (int i=m_StackPointer - 2; i>=0; i--) { if (m_Stack[i].Equiv(pAwareLock)) { _ASSERTE(!"Locks are released out of order. This might be okay..."); memcpy(&m_Stack[i], &m_Stack[i+1], sizeof(m_Stack[0]) * (m_StackPointer - i - 1)); return; } } _ASSERTE(!"Trying to release a synchronization lock which isn't held"); } } } #endif // TRACK_SYNC static DWORD dwHashCodeSeed = 123456789; //-------------------------------------------------------------------- // Thread construction //-------------------------------------------------------------------- Thread::Thread() { CONTRACTL { THROWS; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; m_pFrame = FRAME_TOP; m_pGCFrame = NULL; m_fPreemptiveGCDisabled = 0; #ifdef _DEBUG m_ulForbidTypeLoad = 0; m_GCOnTransitionsOK = TRUE; #endif #ifdef ENABLE_CONTRACTS m_ulEnablePreemptiveGCCount = 0; #endif #ifdef _DEBUG dbg_m_cSuspendedThreads = 0; dbg_m_cSuspendedThreadsWithoutOSLock = 0; m_Creator.Clear(); m_dwUnbreakableLockCount = 0; #endif m_dwForbidSuspendThread = 0; // Initialize lock state m_pHead = &m_embeddedEntry; m_embeddedEntry.pNext = m_pHead; m_embeddedEntry.pPrev = m_pHead; m_embeddedEntry.dwLLockID = 0; m_embeddedEntry.dwULockID = 0; m_embeddedEntry.wReaderLevel = 0; m_pBlockingLock = NULL; m_alloc_context.init(); m_thAllocContextObj = 0; m_UserInterrupt = 0; m_WaitEventLink.m_Next = NULL; m_WaitEventLink.m_LinkSB.m_pNext = NULL; m_ThreadHandle = INVALID_HANDLE_VALUE; m_ThreadHandleForClose = INVALID_HANDLE_VALUE; m_ThreadHandleForResume = INVALID_HANDLE_VALUE; m_WeOwnThreadHandle = FALSE; #ifdef _DEBUG m_ThreadId = UNINITIALIZED_THREADID; #endif //_DEBUG // Initialize this variable to a very different start value for each thread // Using linear congruential generator from Knuth Vol. 2, p. 102, line 24 dwHashCodeSeed = dwHashCodeSeed * 1566083941 + 1; m_dwHashCodeSeed = dwHashCodeSeed; m_hijackLock = FALSE; m_OSThreadId = 0; m_Priority = INVALID_THREAD_PRIORITY; m_ExternalRefCount = 1; m_State = TS_Unstarted; m_StateNC = TSNC_Unknown; // It can't be a LongWeakHandle because we zero stuff out of the exposed // object as it is finalized. At that point, calls to GetCurrentThread() // had better get a new one,! m_ExposedObject = CreateGlobalShortWeakHandle(NULL); GlobalShortWeakHandleHolder exposedObjectHolder(m_ExposedObject); m_StrongHndToExposedObject = CreateGlobalStrongHandle(NULL); GlobalStrongHandleHolder strongHndToExposedObjectHolder(m_StrongHndToExposedObject); m_LastThrownObjectHandle = NULL; m_ltoIsUnhandled = FALSE; m_debuggerFilterContext = NULL; m_fInteropDebuggingHijacked = FALSE; m_profilerCallbackState = 0; for (int i = 0; i < MAX_NOTIFICATION_PROFILERS + 1; ++i) { m_dwProfilerEvacuationCounters[i] = 0; } m_pProfilerFilterContext = NULL; m_CacheStackBase = 0; m_CacheStackLimit = 0; m_CacheStackSufficientExecutionLimit = 0; m_CacheStackStackAllocNonRiskyExecutionLimit = 0; #ifdef _DEBUG m_pCleanedStackBase = NULL; #endif #ifdef STACK_GUARDS_DEBUG m_pCurrentStackGuard = NULL; #endif #ifdef FEATURE_HIJACK m_ppvHJRetAddrPtr = (VOID**) 0xCCCCCCCCCCCCCCCC; m_pvHJRetAddr = (VOID*) 0xCCCCCCCCCCCCCCCC; #ifndef TARGET_UNIX X86_ONLY(m_LastRedirectIP = 0); X86_ONLY(m_SpinCount = 0); #endif // TARGET_UNIX #endif // FEATURE_HIJACK #if defined(_DEBUG) && defined(TRACK_SYNC) m_pTrackSync = new Dbg_TrackSyncStack; NewHolder<Dbg_TrackSyncStack> trackSyncHolder(static_cast<Dbg_TrackSyncStack*>(m_pTrackSync)); #endif // TRACK_SYNC m_PreventAsync = 0; #ifdef FEATURE_COMINTEROP m_fDisableComObjectEagerCleanup = false; #endif //FEATURE_COMINTEROP m_fHasDeadThreadBeenConsideredForGCTrigger = false; m_TraceCallCount = 0; m_ThrewControlForThread = 0; m_ThreadTasks = (ThreadTasks)0; m_pLoadLimiter= NULL; // The state and the tasks must be 32-bit aligned for atomicity to be guaranteed. _ASSERTE((((size_t) &m_State) & 3) == 0); _ASSERTE((((size_t) &m_ThreadTasks) & 3) == 0); // On all callbacks, call the trap code, which we now have // wired to cause a GC. Thus we will do a GC on all Transition Frame Transitions (and more). if (GCStress<cfg_transition>::IsEnabled()) { m_State = (ThreadState) (m_State | TS_GCOnTransitions); } m_AbortType = EEPolicy::TA_None; m_AbortEndTime = MAXULONGLONG; m_RudeAbortEndTime = MAXULONGLONG; m_AbortController = 0; m_AbortRequestLock = 0; m_fRudeAbortInitiated = FALSE; m_pIOCompletionContext = NULL; #ifdef _DEBUG m_fRudeAborted = FALSE; m_dwAbortPoint = 0; #endif m_OSContext = new CONTEXT(); NewHolder<CONTEXT> contextHolder(m_OSContext); m_pSavedRedirectContext = NULL; m_pOSContextBuffer = NULL; #ifdef _DEBUG m_RedirectContextInUse = false; #endif #ifdef FEATURE_COMINTEROP m_pRCWStack = new RCWStackHeader(); #endif #ifdef _DEBUG m_bGCStressing = FALSE; m_bUniqueStacking = FALSE; #endif m_pPendingTypeLoad = NULL; m_pIBCInfo = NULL; m_dwAVInRuntimeImplOkayCount = 0; #if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) && !defined(TARGET_UNIX) // GCCOVER m_fPreemptiveGCDisabledForGCStress = false; #endif #ifdef _DEBUG m_pHelperMethodFrameCallerList = (HelperMethodFrameCallerList*)-1; #endif m_pExceptionDuringStartup = NULL; #ifdef HAVE_GCCOVER m_pbDestCode = NULL; m_pbSrcCode = NULL; #if defined(GCCOVER_TOLERATE_SPURIOUS_AV) m_pLastAVAddress = NULL; #endif // defined(GCCOVER_TOLERATE_SPURIOUS_AV) #endif // HAVE_GCCOVER m_debuggerActivePatchSkipper = NULL; m_dwThreadHandleBeingUsed = 0; SetProfilerCallbacksAllowed(TRUE); m_pCreatingThrowableForException = NULL; #ifdef FEATURE_EH_FUNCLETS m_dwIndexClauseForCatch = 0; m_sfEstablisherOfActualHandlerFrame.Clear(); #endif // FEATURE_EH_FUNCLETS m_workerThreadPoolCompletionCount = 0; m_ioThreadPoolCompletionCount = 0; m_monitorLockContentionCount = 0; m_pDomain = SystemDomain::System()->DefaultDomain(); // Do not expose thread until it is fully constructed g_pThinLockThreadIdDispenser->NewId(this, this->m_ThreadId); // // DO NOT ADD ADDITIONAL CONSTRUCTION AFTER THIS POINT. // NewId() allows this Thread instance to be accessed via a Thread Id. Do not // add additional construction after this point to prevent the race condition // of accessing a partially constructed Thread via Thread Id lookup. // exposedObjectHolder.SuppressRelease(); strongHndToExposedObjectHolder.SuppressRelease(); #if defined(_DEBUG) && defined(TRACK_SYNC) trackSyncHolder.SuppressRelease(); #endif contextHolder.SuppressRelease(); #ifdef FEATURE_COMINTEROP m_uliInitializeSpyCookie.QuadPart = 0ul; m_fInitializeSpyRegistered = false; m_pLastSTACtxCookie = NULL; #endif // FEATURE_COMINTEROP m_fGCSpecial = FALSE; #ifndef TARGET_UNIX m_wCPUGroup = 0; m_pAffinityMask = 0; #endif // !TARGET_UNIX m_pAllLoggedTypes = NULL; #ifdef FEATURE_PERFTRACING memset(&m_activityId, 0, sizeof(m_activityId)); #endif // FEATURE_PERFTRACING m_HijackReturnKind = RT_Illegal; m_currentPrepareCodeConfig = nullptr; m_isInForbidSuspendForDebuggerRegion = false; m_hasPendingActivation = false; #ifdef _DEBUG memset(dangerousObjRefs, 0, sizeof(dangerousObjRefs)); #endif // _DEBUG } //-------------------------------------------------------------------- // Failable initialization occurs here. //-------------------------------------------------------------------- void Thread::InitThread() { CONTRACTL { THROWS; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; HANDLE hDup = INVALID_HANDLE_VALUE; BOOL ret = TRUE; // This message actually serves a purpose (which is why it is always run) // The Stress log is run during hijacking, when other threads can be suspended // at arbitrary locations (including when holding a lock that NT uses to serialize // all memory allocations). By sending a message now, we insure that the stress // log will not allocate memory at these critical times an avoid deadlock. STRESS_LOG2(LF_ALWAYS, LL_ALWAYS, "SetupThread managed Thread %p Thread Id = %x\n", this, GetThreadId()); #ifndef TARGET_UNIX // workaround: Remove this when we flow impersonation token to host. BOOL reverted = FALSE; HANDLE threadToken = INVALID_HANDLE_VALUE; #endif // !TARGET_UNIX if (m_ThreadHandle == INVALID_HANDLE_VALUE) { // For WinCE, all clients have the same handle for a thread. Duplication is // not possible. We make sure we never close this handle unless we created // the thread (TS_WeOwn). // // For Win32, each client has its own handle. This is achieved by duplicating // the pseudo-handle from ::GetCurrentThread(). Unlike WinCE, this service // returns a pseudo-handle which is only useful for duplication. In this case // each client is responsible for closing its own (duplicated) handle. // // We don't bother duplicating if WeOwn, because we created the handle in the // first place. // Thread is created when or after the physical thread started running HANDLE curProcess = ::GetCurrentProcess(); #ifndef TARGET_UNIX // If we're impersonating on NT, then DuplicateHandle(GetCurrentThread()) is going to give us a handle with only // THREAD_TERMINATE, THREAD_QUERY_INFORMATION, and THREAD_SET_INFORMATION. This doesn't include // THREAD_SUSPEND_RESUME nor THREAD_GET_CONTEXT. We need to be able to suspend the thread, and we need to be // able to get its context. Therefore, if we're impersonating, we revert to self, dup the handle, then // re-impersonate before we leave this routine. if (!RevertIfImpersonated(&reverted, &threadToken)) { COMPlusThrowWin32(); } class EnsureResetThreadToken { private: BOOL m_NeedReset; HANDLE m_threadToken; public: EnsureResetThreadToken(HANDLE threadToken, BOOL reverted) { m_threadToken = threadToken; m_NeedReset = reverted; } ~EnsureResetThreadToken() { UndoRevert(m_NeedReset, m_threadToken); if (m_threadToken != INVALID_HANDLE_VALUE) { CloseHandle(m_threadToken); } } }; EnsureResetThreadToken resetToken(threadToken, reverted); #endif // !TARGET_UNIX if (::DuplicateHandle(curProcess, ::GetCurrentThread(), curProcess, &hDup, 0 /*ignored*/, FALSE /*inherit*/, DUPLICATE_SAME_ACCESS)) { _ASSERTE(hDup != INVALID_HANDLE_VALUE); SetThreadHandle(hDup); m_WeOwnThreadHandle = TRUE; } else { COMPlusThrowWin32(); } } if ((m_State & TS_WeOwn) == 0) { if (!AllocHandles()) { ThrowOutOfMemory(); } } _ASSERTE(HasValidThreadHandle()); m_random.Init(); // Set floating point mode to round to nearest #ifndef TARGET_UNIX (void) _controlfp_s( NULL, _RC_NEAR, _RC_CHOP|_RC_UP|_RC_DOWN|_RC_NEAR ); m_pTEB = (struct _NT_TIB*)NtCurrentTeb(); #endif // !TARGET_UNIX if (m_CacheStackBase == 0) { _ASSERTE(m_CacheStackLimit == 0); ret = SetStackLimits(fAll); if (ret == FALSE) { ThrowOutOfMemory(); } } ret = Thread::AllocateIOCompletionContext(); if (!ret) { ThrowOutOfMemory(); } } // Allocate all the handles. When we are kicking of a new thread, we can call // here before the thread starts running. BOOL Thread::AllocHandles() { WRAPPER_NO_CONTRACT; _ASSERTE(!m_DebugSuspendEvent.IsValid()); _ASSERTE(!m_EventWait.IsValid()); BOOL fOK = TRUE; EX_TRY { // create a manual reset event for getting the thread to a safe point m_DebugSuspendEvent.CreateManualEvent(FALSE); m_EventWait.CreateManualEvent(TRUE); } EX_CATCH { fOK = FALSE; if (!m_DebugSuspendEvent.IsValid()) { m_DebugSuspendEvent.CloseEvent(); } if (!m_EventWait.IsValid()) { m_EventWait.CloseEvent(); } } EX_END_CATCH(RethrowTerminalExceptions); return fOK; } //-------------------------------------------------------------------- // This is the alternate path to SetupThread/InitThread. If we created // an unstarted thread, we have SetupUnstartedThread/HasStarted. //-------------------------------------------------------------------- BOOL Thread::HasStarted() { CONTRACTL { NOTHROW; DISABLED(GC_NOTRIGGER); } CONTRACTL_END; _ASSERTE(!m_fPreemptiveGCDisabled); // can't use PreemptiveGCDisabled() here // This is cheating a little. There is a pathway here from SetupThread, but only // via IJW SystemDomain::RunDllMain. Normally SetupThread returns a thread in // preemptive mode, ready for a transition. But in the IJW case, it can return a // cooperative mode thread. RunDllMain handles this "surprise" correctly. m_fPreemptiveGCDisabled = TRUE; // Normally, HasStarted is called from the thread's entrypoint to introduce it to // the runtime. But sometimes that thread is used for DLL_THREAD_ATTACH notifications // that call into managed code. In that case, the second HasStarted call is // redundant and should be ignored. if (GetThreadNULLOk() == this) return TRUE; _ASSERTE(GetThreadNULLOk() == 0); _ASSERTE(HasValidThreadHandle()); BOOL fCanCleanupCOMState = FALSE; BOOL res = TRUE; res = SetStackLimits(fAll); if (res == FALSE) { m_pExceptionDuringStartup = Exception::GetOOMException(); goto FAILURE; } // If any exception happens during HasStarted, we will cache the exception in Thread::m_pExceptionDuringStartup // which will be thrown in Thread.Start as an internal exception EX_TRY { SetupTLSForThread(); InitThread(); fCanCleanupCOMState = TRUE; // Preparing the COM apartment and context may attempt // to transition to Preemptive mode. At this point in // the thread's lifetime this can be a bad thing if a GC // is triggered (e.g. GCStress). Do the preparation prior // to the thread being set so the Preemptive mode transition // is a no-op. PrepareApartmentAndContext(); SetThread(this); SetAppDomain(m_pDomain); ThreadStore::TransferStartedThread(this); #ifdef FEATURE_EVENT_TRACE ETW::ThreadLog::FireThreadCreated(this); #endif // FEATURE_EVENT_TRACE } EX_CATCH { if (__pException != NULL) { __pException.SuppressRelease(); m_pExceptionDuringStartup = __pException; } res = FALSE; } EX_END_CATCH(SwallowAllExceptions); if (res == FALSE) goto FAILURE; FastInterlockOr((ULONG *) &m_State, TS_FullyInitialized); #ifdef DEBUGGING_SUPPORTED // // If we're debugging, let the debugger know that this // thread is up and running now. // if (CORDebuggerAttached()) { g_pDebugInterface->ThreadCreated(this); } else { LOG((LF_CORDB, LL_INFO10000, "ThreadCreated() not called due to CORDebuggerAttached() being FALSE for thread 0x%x\n", GetThreadId())); } #endif // DEBUGGING_SUPPORTED #ifdef PROFILING_SUPPORTED // If a profiler is running, let them know about the new thread. // // The call to IsGCSpecial is crucial to avoid a deadlock. See code:Thread::m_fGCSpecial for more // information if (!IsGCSpecial()) { BEGIN_PROFILER_CALLBACK(CORProfilerTrackThreads()); BOOL gcOnTransition = GC_ON_TRANSITIONS(FALSE); // disable GCStress 2 to avoid the profiler receiving a RuntimeThreadSuspended notification even before the ThreadCreated notification { GCX_PREEMP(); (&g_profControlBlock)->ThreadCreated((ThreadID) this); } GC_ON_TRANSITIONS(gcOnTransition); DWORD osThreadId = ::GetCurrentThreadId(); (&g_profControlBlock)->ThreadAssignedToOSThread( (ThreadID) this, osThreadId); END_PROFILER_CALLBACK(); } #endif // PROFILING_SUPPORTED // Reset the ThreadStoreLock state flag since the thread // has now been started. ResetThreadStateNC(Thread::TSNC_TSLTakenForStartup); return TRUE; FAILURE: if (m_fPreemptiveGCDisabled) { m_fPreemptiveGCDisabled = FALSE; } _ASSERTE (HasThreadState(TS_Unstarted)); SetThreadState(TS_FailStarted); if (GetThreadNULLOk() != NULL && IsAbortRequested()) UnmarkThreadForAbort(); #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT // // Undo the platform context initialization, so we don't leak a CoInitialize. // if (fCanCleanupCOMState) { // The thread pointer in TLS may not be set yet, if we had a failure before we set it. // So we'll set it up here (we'll unset it a few lines down). SetThread(this); CleanupCOMState(); } #endif FastInterlockDecrement(&ThreadStore::s_pThreadStore->m_PendingThreadCount); // One of the components of OtherThreadsComplete() has changed, so check whether // we should now exit the EE. ThreadStore::CheckForEEShutdown(); DecExternalCount(/*holdingLock*/ HasThreadStateNC(Thread::TSNC_TSLTakenForStartup)); SetThread(NULL); SetAppDomain(NULL); return FALSE; } BOOL Thread::AllocateIOCompletionContext() { WRAPPER_NO_CONTRACT; PIOCompletionContext pIOC = new (nothrow) IOCompletionContext; if(pIOC != NULL) { pIOC->lpOverlapped = NULL; m_pIOCompletionContext = pIOC; return TRUE; } else { return FALSE; } } VOID Thread::FreeIOCompletionContext() { WRAPPER_NO_CONTRACT; if (m_pIOCompletionContext != NULL) { PIOCompletionContext pIOC = (PIOCompletionContext) m_pIOCompletionContext; delete pIOC; m_pIOCompletionContext = NULL; } } void Thread::HandleThreadStartupFailure() { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; _ASSERTE(GetThreadNULLOk() != NULL); struct ProtectArgs { OBJECTREF pThrowable; OBJECTREF pReason; } args; memset(&args, 0, sizeof(ProtectArgs)); GCPROTECT_BEGIN(args); MethodTable *pMT = CoreLibBinder::GetException(kThreadStartException); args.pThrowable = AllocateObject(pMT); MethodDescCallSite exceptionCtor(METHOD__THREAD_START_EXCEPTION__EX_CTOR); if (m_pExceptionDuringStartup) { args.pReason = CLRException::GetThrowableFromException(m_pExceptionDuringStartup); Exception::Delete(m_pExceptionDuringStartup); m_pExceptionDuringStartup = NULL; } ARG_SLOT args1[] = { ObjToArgSlot(args.pThrowable), ObjToArgSlot(args.pReason), }; exceptionCtor.Call(args1); GCPROTECT_END(); //Prot RaiseTheExceptionInternalOnly(args.pThrowable, FALSE); } #ifndef TARGET_UNIX BOOL RevertIfImpersonated(BOOL *bReverted, HANDLE *phToken) { WRAPPER_NO_CONTRACT; BOOL bImpersonated = OpenThreadToken(GetCurrentThread(), // we are assuming that if this call fails, TOKEN_IMPERSONATE, // we are not impersonating. There is no win32 TRUE, // api to figure this out. The only alternative phToken); // is to use NtCurrentTeb->IsImpersonating(). if (bImpersonated) { *bReverted = RevertToSelf(); return *bReverted; } return TRUE; } void UndoRevert(BOOL bReverted, HANDLE hToken) { if (bReverted) { if (!SetThreadToken(NULL, hToken)) { _ASSERT("Undo Revert -> SetThreadToken failed"); STRESS_LOG1(LF_EH, LL_INFO100, "UndoRevert/SetThreadToken failed for hToken = %d\n",hToken); EEPOLICY_HANDLE_FATAL_ERROR(COR_E_SECURITY); } } return; } #endif // !TARGET_UNIX // We don't want ::CreateThread() calls scattered throughout the source. So gather // them all here. BOOL Thread::CreateNewThread(SIZE_T stackSize, LPTHREAD_START_ROUTINE start, void *args, LPCWSTR pName) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; BOOL bRet; //This assert is here to prevent a bug in the future // CreateTask currently takes a DWORD and we will downcast // if that interface changes to take a SIZE_T this Assert needs to be removed. // _ASSERTE(stackSize <= 0xFFFFFFFF); #ifndef TARGET_UNIX HandleHolder token; BOOL bReverted = FALSE; bRet = RevertIfImpersonated(&bReverted, &token); if (bRet != TRUE) return bRet; #endif // !TARGET_UNIX m_StateNC = (ThreadStateNoConcurrency)((ULONG)m_StateNC | TSNC_CLRCreatedThread); bRet = CreateNewOSThread(stackSize, start, args); #ifndef TARGET_UNIX UndoRevert(bReverted, token); #endif // !TARGET_UNIX if (pName != NULL) SetThreadName(m_ThreadHandle, pName); return bRet; } void Thread::InitializationForManagedThreadInNative(_In_ Thread* pThread) { CONTRACTL { NOTHROW; MODE_ANY; GC_TRIGGERS; PRECONDITION(pThread != NULL); } CONTRACTL_END; #ifdef FEATURE_OBJCMARSHAL { GCX_COOP_THREAD_EXISTS(pThread); PREPARE_NONVIRTUAL_CALLSITE(METHOD__AUTORELEASEPOOL__CREATEAUTORELEASEPOOL); DECLARE_ARGHOLDER_ARRAY(args, 0); CALL_MANAGED_METHOD_NORET(args); } #endif // FEATURE_OBJCMARSHAL } void Thread::CleanUpForManagedThreadInNative(_In_ Thread* pThread) { CONTRACTL { NOTHROW; MODE_ANY; GC_TRIGGERS; PRECONDITION(pThread != NULL); } CONTRACTL_END; #ifdef FEATURE_OBJCMARSHAL { GCX_COOP_THREAD_EXISTS(pThread); PREPARE_NONVIRTUAL_CALLSITE(METHOD__AUTORELEASEPOOL__DRAINAUTORELEASEPOOL); DECLARE_ARGHOLDER_ARRAY(args, 0); CALL_MANAGED_METHOD_NORET(args); } #endif // FEATURE_OBJCMARSHAL } HANDLE Thread::CreateUtilityThread(Thread::StackSizeBucket stackSizeBucket, LPTHREAD_START_ROUTINE start, void *args, LPCWSTR pName, DWORD flags, DWORD* pThreadId) { LIMITED_METHOD_CONTRACT; // TODO: we should always use small stacks for most of these threads. For CLR 4, we're being conservative // here because this is a last-minute fix. SIZE_T stackSize; switch (stackSizeBucket) { case StackSize_Small: stackSize = 256 * 1024; break; case StackSize_Medium: stackSize = 512 * 1024; break; default: _ASSERTE(!"Bad stack size bucket"); break; case StackSize_Large: stackSize = 1024 * 1024; break; } flags |= STACK_SIZE_PARAM_IS_A_RESERVATION; DWORD threadId; HANDLE hThread = CreateThread(NULL, stackSize, start, args, flags, &threadId); SetThreadName(hThread, pName); if (pThreadId) *pThreadId = threadId; return hThread; } // Represent the value of DEFAULT_STACK_SIZE as passed in the property bag to the host during construction static unsigned long s_defaultStackSizeProperty = 0; void ParseDefaultStackSize(LPCWSTR valueStr) { if (valueStr) { LPWSTR end; errno = 0; unsigned long value = wcstoul(valueStr, &end, 16); // Base 16 without a prefix if ((errno == ERANGE) // Parsed value doesn't fit in an unsigned long || (valueStr == end) // No characters parsed || (end == nullptr) // Unexpected condition (should never happen) || (end[0] != 0)) // Unprocessed terminal characters { ThrowHR(E_INVALIDARG); } else { s_defaultStackSizeProperty = value; } } } SIZE_T GetDefaultStackSizeSetting() { static DWORD s_defaultStackSizeEnv = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DefaultStackSize); uint64_t value = s_defaultStackSizeEnv ? s_defaultStackSizeEnv : s_defaultStackSizeProperty; SIZE_T minStack = 0x10000; // 64K - Somewhat arbitrary minimum thread stack size SIZE_T maxStack = 0x80000000; // 2G - Somewhat arbitrary maximum thread stack size if ((value >= maxStack) || ((value != 0) && (value < minStack))) { ThrowHR(E_INVALIDARG); } return (SIZE_T) value; } BOOL Thread::GetProcessDefaultStackSize(SIZE_T* reserveSize, SIZE_T* commitSize) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // // Let's get the stack sizes from the PE file that started process. // static SIZE_T ExeSizeOfStackReserve = 0; static SIZE_T ExeSizeOfStackCommit = 0; static BOOL fSizesGot = FALSE; if (!fSizesGot) { SIZE_T defaultStackSizeSetting = GetDefaultStackSizeSetting(); if (defaultStackSizeSetting != 0) { ExeSizeOfStackReserve = defaultStackSizeSetting; ExeSizeOfStackCommit = defaultStackSizeSetting; fSizesGot = TRUE; } } #ifndef TARGET_UNIX if (!fSizesGot) { HINSTANCE hInst = WszGetModuleHandle(NULL); _ASSERTE(hInst); // WszGetModuleHandle should never fail on the module that started the process. EX_TRY { PEDecoder pe(hInst); pe.GetEXEStackSizes(&ExeSizeOfStackReserve, &ExeSizeOfStackCommit); fSizesGot = TRUE; } EX_CATCH { fSizesGot = FALSE; } EX_END_CATCH(SwallowAllExceptions); } #endif // !TARGET_UNIX if (!fSizesGot) { //return some somewhat-reasonable numbers if (NULL != reserveSize) *reserveSize = 256*1024; if (NULL != commitSize) *commitSize = 256*1024; return FALSE; } if (NULL != reserveSize) *reserveSize = ExeSizeOfStackReserve; if (NULL != commitSize) *commitSize = ExeSizeOfStackCommit; return TRUE; } BOOL Thread::CreateNewOSThread(SIZE_T sizeToCommitOrReserve, LPTHREAD_START_ROUTINE start, void *args) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; #ifdef TARGET_UNIX SIZE_T ourId = 0; #else DWORD ourId = 0; #endif HANDLE h = NULL; DWORD dwCreationFlags = CREATE_SUSPENDED; dwCreationFlags |= STACK_SIZE_PARAM_IS_A_RESERVATION; if (sizeToCommitOrReserve == 0) { sizeToCommitOrReserve = GetDefaultStackSizeSetting(); } #ifndef TARGET_UNIX // the PAL does its own adjustments as necessary if (sizeToCommitOrReserve != 0 && sizeToCommitOrReserve <= GetOsPageSize()) { // On Windows, passing a value that is <= one page size bizarrely causes the OS to use the default stack size instead of // a minimum, which is undesirable. This adjustment fixes that issue to use a minimum stack size (typically 64 KB). sizeToCommitOrReserve = GetOsPageSize() + 1; } #endif // !TARGET_UNIX // Make sure we have all our handles, in case someone tries to suspend us // as we are starting up. if (!AllocHandles()) { // OS is out of handles/memory? return FALSE; } #ifdef TARGET_UNIX h = ::PAL_CreateThread64(NULL /*=SECURITY_ATTRIBUTES*/, #else h = ::CreateThread( NULL /*=SECURITY_ATTRIBUTES*/, #endif sizeToCommitOrReserve, start, args, dwCreationFlags, &ourId); if (h == NULL) return FALSE; _ASSERTE(!m_fPreemptiveGCDisabled); // leave in preemptive until HasStarted. SetThreadHandle(h); m_WeOwnThreadHandle = TRUE; // Before we do the resume, we need to take note of the new ThreadId. This // is necessary because -- before the thread starts executing at KickofThread -- // it may perform some DllMain DLL_THREAD_ATTACH notifications. These could // call into managed code. During the consequent SetupThread, we need to // perform the Thread::HasStarted call instead of going through the normal // 'new thread' pathway. _ASSERTE(GetOSThreadId() == 0); _ASSERTE(ourId != 0); m_OSThreadId = ourId; FastInterlockIncrement(&ThreadStore::s_pThreadStore->m_PendingThreadCount); #ifdef _DEBUG m_Creator.SetToCurrentThread(); #endif return TRUE; } // // #threadDestruction // // General comments on thread destruction. // // The C++ Thread object can survive beyond the time when the Win32 thread has died. // This is important if an exposed object has been created for this thread. The // exposed object will survive until it is GC'ed. // // A client like an exposed object can place an external reference count on that // object. We also place a reference count on it when we construct it, and we lose // that count when the thread finishes doing useful work (OnThreadTerminate). // // One way OnThreadTerminate() is called is when the thread finishes doing useful // work. This case always happens on the correct thread. // // The other way OnThreadTerminate() is called is during product shutdown. We do // a "best effort" to eliminate all threads except the Main thread before shutdown // happens. But there may be some background threads or external threads still // running. // // When the final reference count disappears, we destruct. Until then, the thread // remains in the ThreadStore, but is marked as "Dead". //<TODO> // @TODO cwb: for a typical shutdown, only background threads are still around. // Should we interrupt them? What about the non-typical shutdown?</TODO> int Thread::IncExternalCount() { CONTRACTL { NOTHROW; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; Thread *pCurThread = GetThreadNULLOk(); _ASSERTE(m_ExternalRefCount > 0); int retVal = FastInterlockIncrement((LONG*)&m_ExternalRefCount); // If we have an exposed object and the refcount is greater than one // we must make sure to keep a strong handle to the exposed object // so that we keep it alive even if nobody has a reference to it. if (pCurThread && ((*((void**)m_ExposedObject)) != NULL)) { // The exposed object exists and needs a strong handle so check // to see if it has one. // Only a managed thread can setup StrongHnd. if ((*((void**)m_StrongHndToExposedObject)) == NULL) { GCX_COOP(); // Store the object in the strong handle. StoreObjectInHandle(m_StrongHndToExposedObject, ObjectFromHandle(m_ExposedObject)); } } return retVal; } int Thread::DecExternalCount(BOOL holdingLock) { CONTRACTL { NOTHROW; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; // Note that it's possible to get here with a NULL current thread (during // shutdown of the thread manager). Thread *pCurThread = GetThreadNULLOk(); _ASSERTE (pCurThread == NULL || IsAtProcessExit() || (!holdingLock && !ThreadStore::HoldingThreadStore(pCurThread)) || (holdingLock && ThreadStore::HoldingThreadStore(pCurThread))); BOOL ToggleGC = FALSE; BOOL SelfDelete = FALSE; int retVal; // Must synchronize count and exposed object handle manipulation. We use the // thread lock for this, which implies that we must be in pre-emptive mode // to begin with and avoid any activity that would invoke a GC (this // acquires the thread store lock). if (pCurThread) { // TODO: we would prefer to use a GC Holder here, however it is hard // to get the case where we're deleting this thread correct given // the current macros. We want to supress the release of the holder // here which puts us in Preemptive mode, and also the switch to // Cooperative mode below, but since both holders will be named // the same thing (due to the generic nature of the macro) we can // not use GCX_*_SUPRESS_RELEASE() for 2 holders in the same scope // b/c they will both apply simply to the most narrowly scoped // holder. ToggleGC = pCurThread->PreemptiveGCDisabled(); if (ToggleGC) pCurThread->EnablePreemptiveGC(); } GCX_ASSERT_PREEMP(); ThreadStoreLockHolder tsLock(!holdingLock); _ASSERTE(m_ExternalRefCount >= 1); _ASSERTE(!holdingLock || ThreadStore::s_pThreadStore->m_Crst.GetEnterCount() > 0 || IsAtProcessExit()); retVal = FastInterlockDecrement((LONG*)&m_ExternalRefCount); if (retVal == 0) { HANDLE h = GetThreadHandle(); if (h == INVALID_HANDLE_VALUE) { h = m_ThreadHandleForClose; m_ThreadHandleForClose = INVALID_HANDLE_VALUE; } // Can not assert like this. We have already removed the Unstarted bit. //_ASSERTE (IsUnstarted() || h != INVALID_HANDLE_VALUE); if (h != INVALID_HANDLE_VALUE && m_WeOwnThreadHandle) { ::CloseHandle(h); SetThreadHandle(INVALID_HANDLE_VALUE); } // Switch back to cooperative mode to manipulate the thread. if (pCurThread) { // TODO: we would prefer to use GCX_COOP here, see comment above. pCurThread->DisablePreemptiveGC(); } GCX_ASSERT_COOP(); // during process detach the thread might still be in the thread list // if it hasn't seen its DLL_THREAD_DETACH yet. Use the following // tweak to decide if the thread has terminated yet. if (!HasValidThreadHandle()) { SelfDelete = this == pCurThread; m_ExceptionState.FreeAllStackTraces(); if (SelfDelete) { SetThread(NULL); } delete this; } tsLock.Release(); // It only makes sense to restore the GC mode if we didn't just destroy // our own thread object. if (pCurThread && !SelfDelete && !ToggleGC) { pCurThread->EnablePreemptiveGC(); } // Cannot use this here b/c it creates a holder named the same as GCX_ASSERT_COOP // in the same scope above... // // GCX_ASSERT_PREEMP() return retVal; } else if (pCurThread == NULL) { // We're in shutdown, too late to be worrying about having a strong // handle to the exposed thread object, we've already performed our // final GC. tsLock.Release(); return retVal; } else { // Check to see if the external ref count reaches exactly one. If this // is the case and we have an exposed object then it is that exposed object // that is holding a reference to us. To make sure that we are not the // ones keeping the exposed object alive we need to remove the strong // reference we have to it. if ((retVal == 1) && ((*((void**)m_StrongHndToExposedObject)) != NULL)) { // Switch back to cooperative mode to manipulate the object. // Don't want to switch back to COOP until we let go of the lock // however we are allowed to call StoreObjectInHandle here in preemptive // mode because we are setting the value to NULL. CONTRACT_VIOLATION(ModeViolation); // Clear the handle and leave the lock. // We do not have to to DisablePreemptiveGC here, because // we just want to put NULL into a handle. StoreObjectInHandle(m_StrongHndToExposedObject, NULL); tsLock.Release(); // Switch back to the initial GC mode. if (ToggleGC) { pCurThread->DisablePreemptiveGC(); } GCX_ASSERT_COOP(); return retVal; } } tsLock.Release(); // Switch back to the initial GC mode. if (ToggleGC) { pCurThread->DisablePreemptiveGC(); } return retVal; } //-------------------------------------------------------------------- // Destruction. This occurs after the associated native thread // has died. //-------------------------------------------------------------------- Thread::~Thread() { CONTRACTL { NOTHROW; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; // TODO: enable this //_ASSERTE(GetThread() != this); _ASSERTE(m_ThrewControlForThread == 0); // AbortRequest is coupled with TrapReturningThread. // We should have unmarked the thread for abort. // !!! Can not assert here. If a thread has no managed code on stack // !!! we leave the g_TrapReturningThread set so that the thread will be // !!! aborted if it enters managed code. //_ASSERTE(!IsAbortRequested()); // We should not have the Thread marked for abort. But if we have // we need to unmark it so that g_TrapReturningThreads is decremented. if (IsAbortRequested()) { UnmarkThreadForAbort(); } #if defined(_DEBUG) && defined(TRACK_SYNC) _ASSERTE(IsAtProcessExit() || ((Dbg_TrackSyncStack *) m_pTrackSync)->m_StackPointer == 0); delete m_pTrackSync; #endif // TRACK_SYNC _ASSERTE(IsDead() || IsUnstarted() || IsAtProcessExit()); if (m_WaitEventLink.m_Next != NULL && !IsAtProcessExit()) { WaitEventLink *walk = &m_WaitEventLink; while (walk->m_Next) { ThreadQueue::RemoveThread(this, (SyncBlock*)((DWORD_PTR)walk->m_Next->m_WaitSB & ~1)); StoreEventToEventStore (walk->m_Next->m_EventWait); } m_WaitEventLink.m_Next = NULL; } if (m_StateNC & TSNC_ExistInThreadStore) { BOOL ret; ret = ThreadStore::RemoveThread(this); _ASSERTE(ret); } #ifdef _DEBUG m_pFrame = (Frame *)POISONC; #endif // Normally we shouldn't get here with a valid thread handle; however if SetupThread // failed (due to an OOM for example) then we need to CloseHandle the thread // handle if we own it. if (m_WeOwnThreadHandle && (GetThreadHandle() != INVALID_HANDLE_VALUE)) { CloseHandle(GetThreadHandle()); } if (m_DebugSuspendEvent.IsValid()) { m_DebugSuspendEvent.CloseEvent(); } if (m_EventWait.IsValid()) { m_EventWait.CloseEvent(); } FreeIOCompletionContext(); if (m_OSContext) delete m_OSContext; if (m_pOSContextBuffer) { delete[] m_pOSContextBuffer; m_pOSContextBuffer = NULL; } else if (m_pSavedRedirectContext) { delete m_pSavedRedirectContext; } MarkRedirectContextInUse(m_pSavedRedirectContext); m_pSavedRedirectContext = NULL; #ifdef FEATURE_COMINTEROP if (m_pRCWStack) delete m_pRCWStack; #endif if (m_pExceptionDuringStartup) { Exception::Delete (m_pExceptionDuringStartup); } ClearContext(); if (!IsAtProcessExit()) { // Destroy any handles that we're using to hold onto exception objects SafeSetThrowables(NULL); DestroyShortWeakHandle(m_ExposedObject); DestroyStrongHandle(m_StrongHndToExposedObject); } g_pThinLockThreadIdDispenser->DisposeId(GetThreadId()); if (m_pIBCInfo) { delete m_pIBCInfo; } m_tailCallTls.FreeArgBuffer(); #ifdef FEATURE_EVENT_TRACE // Destruct the thread local type cache for allocation sampling if(m_pAllLoggedTypes) { ETW::TypeSystemLog::DeleteTypeHashNoLock(&m_pAllLoggedTypes); } #endif // FEATURE_EVENT_TRACE // Wait for another thread to leave its loop in DeadlockAwareLock::TryBeginEnterLock CrstHolder lock(&g_DeadlockAwareCrst); } #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT void Thread::BaseCoUninitialize() { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_PREEMPTIVE; _ASSERTE(GetThread() == this); ::CoUninitialize(); }// BaseCoUninitialize #ifdef FEATURE_COMINTEROP void Thread::BaseWinRTUninitialize() { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_PREEMPTIVE; _ASSERTE(WinRTSupported()); _ASSERTE(GetThread() == this); _ASSERTE(IsWinRTInitialized()); RoUninitialize(); } #endif // FEATURE_COMINTEROP void Thread::CoUninitialize() { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; // Running threads might have performed a CoInitialize which must // now be balanced. BOOL needsUninitialize = IsCoInitialized() #ifdef FEATURE_COMINTEROP || IsWinRTInitialized() #endif // FEATURE_COMINTEROP ; if (!IsAtProcessExit() && needsUninitialize) { GCX_PREEMP(); CONTRACT_VIOLATION(ThrowsViolation); if (IsCoInitialized()) { BaseCoUninitialize(); FastInterlockAnd((ULONG *)&m_State, ~TS_CoInitialized); } #ifdef FEATURE_COMINTEROP if (IsWinRTInitialized()) { _ASSERTE(WinRTSupported()); BaseWinRTUninitialize(); ResetWinRTInitialized(); } #endif // FEATURE_COMNITEROP } } #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT void Thread::CleanupDetachedThreads() { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; _ASSERTE(!ThreadStore::HoldingThreadStore()); ThreadStoreLockHolder threadStoreLockHolder; Thread *thread = ThreadStore::GetAllThreadList(NULL, 0, 0); STRESS_LOG0(LF_SYNC, LL_INFO1000, "T::CDT called\n"); while (thread != NULL) { Thread *next = ThreadStore::GetAllThreadList(thread, 0, 0); if (thread->IsDetached()) { STRESS_LOG1(LF_SYNC, LL_INFO1000, "T::CDT - detaching thread 0x%p\n", thread); // Unmark that the thread is detached while we have the // thread store lock. This will ensure that no other // thread will race in here and try to delete it, too. FastInterlockAnd((ULONG*)&(thread->m_State), ~TS_Detached); FastInterlockDecrement(&m_DetachCount); if (!thread->IsBackground()) FastInterlockDecrement(&m_ActiveDetachCount); // If the debugger is attached, then we need to unlock the // thread store before calling OnThreadTerminate. That // way, we won't be holding the thread store lock if we // need to block sending a detach thread event. BOOL debuggerAttached = #ifdef DEBUGGING_SUPPORTED CORDebuggerAttached(); #else // !DEBUGGING_SUPPORTED FALSE; #endif // !DEBUGGING_SUPPORTED if (debuggerAttached) ThreadStore::UnlockThreadStore(); thread->OnThreadTerminate(debuggerAttached ? FALSE : TRUE); #ifdef DEBUGGING_SUPPORTED if (debuggerAttached) { ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_OTHER); // We remember the next Thread in the thread store // list before deleting the current one. But we can't // use that Thread pointer now that we release the // thread store lock in the middle of the loop. We // have to start from the beginning of the list every // time. If two threads T1 and T2 race into // CleanupDetachedThreads, then T1 will grab the first // Thread on the list marked for deletion and release // the lock. T2 will grab the second one on the // list. T2 may complete destruction of its Thread, // then T1 might re-acquire the thread store lock and // try to use the next Thread in the thread store. But // T2 just deleted that next Thread. thread = ThreadStore::GetAllThreadList(NULL, 0, 0); } else #endif // DEBUGGING_SUPPORTED { thread = next; } } else if (thread->HasThreadState(TS_Finalized)) { STRESS_LOG1(LF_SYNC, LL_INFO1000, "T::CDT - finalized thread 0x%p\n", thread); thread->ResetThreadState(TS_Finalized); // We have finalized the managed Thread object. Now it is time to clean up the unmanaged part thread->DecExternalCount(TRUE); thread = next; } else { thread = next; } } s_fCleanFinalizedThread = FALSE; } #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT void Thread::CleanupCOMState() { CONTRACTL { NOTHROW; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; #ifdef FEATURE_COMINTEROP if (GetFinalApartment() == Thread::AS_InSTA) ReleaseRCWsInCachesNoThrow(GetCurrentCtxCookie()); #endif // FEATURE_COMINTEROP // Running threads might have performed a CoInitialize which must // now be balanced. However only the thread that called COInitialize can // call CoUninitialize. BOOL needsUninitialize = IsCoInitialized() #ifdef FEATURE_COMINTEROP || IsWinRTInitialized() #endif // FEATURE_COMINTEROP ; if (needsUninitialize) { GCX_PREEMP(); CONTRACT_VIOLATION(ThrowsViolation); if (IsCoInitialized()) { BaseCoUninitialize(); ResetCoInitialized(); } #ifdef FEATURE_COMINTEROP if (IsWinRTInitialized()) { _ASSERTE(WinRTSupported()); BaseWinRTUninitialize(); ResetWinRTInitialized(); } #endif // FEATURE_COMINTEROP } } #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT // See general comments on thread destruction (code:#threadDestruction) above. void Thread::OnThreadTerminate(BOOL holdingLock) { CONTRACTL { NOTHROW; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; // #ReportDeadOnThreadTerminate // Caller should have put the TS_ReportDead bit on by now. // We don't want any windows after the exit event but before the thread is marked dead. // If a debugger attached during such a window (or even took a dump at the exit event), // then it may not realize the thread is dead. // So ensure we mark the thread as dead before we send the tool notifications. // The TS_ReportDead bit will cause the debugger to view this as TS_Dead. _ASSERTE(HasThreadState(TS_ReportDead)); // Should not use OSThreadId: // OSThreadId may change for the current thread is the thread is blocked and rescheduled // by host. Thread *pCurrentThread = GetThreadNULLOk(); DWORD CurrentThreadID = pCurrentThread?pCurrentThread->GetThreadId():0; DWORD ThisThreadID = GetThreadId(); #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT // If the currently running thread is the thread that died and it is an STA thread, then we // need to release all the RCW's in the current context. However, we cannot do this if we // are in the middle of process detach. if (!IsAtProcessExit() && this == GetThreadNULLOk()) { CleanupCOMState(); } #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT if (g_fEEShutDown != 0) { // We have started shutdown. Not safe to touch CLR state. return; } // We took a count during construction, and we rely on the count being // non-zero as we terminate the thread here. _ASSERTE(m_ExternalRefCount > 0); // The thread is no longer running. It's important that we zero any general OBJECTHANDLE's // on this Thread object. That's because we need the managed Thread object to be subject to // GC and yet any HANDLE is opaque to the GC when it comes to collecting cycles. When the // thread is executing, nothing can be collected anyway. But now that we stop running the // cycle concerns us. // // It's important that we only use OBJECTHANDLE's that are retrievable while the thread is // still running. That's what allows us to zero them here with impunity: { // No handles to clean up in the m_ExceptionState _ASSERTE(!m_ExceptionState.IsExceptionInProgress()); GCX_COOP(); // Destroy the LastThrown handle (and anything that violates the above assert). SafeSetThrowables(NULL); // Free all structures related to thread statics for this thread DeleteThreadStaticData(); } if (GCHeapUtilities::IsGCHeapInitialized()) { // Guaranteed to NOT be a shutdown case, because we tear down the heap before // we tear down any threads during shutdown. if (ThisThreadID == CurrentThreadID) { GCX_COOP(); // GetTotalAllocatedBytes reads dead_threads_non_alloc_bytes, but will suspend EE, being in COOP mode we cannot race with that // however, there could be other threads terminating and doing the same Add. FastInterlockExchangeAddLong((LONG64*)&dead_threads_non_alloc_bytes, m_alloc_context.alloc_limit - m_alloc_context.alloc_ptr); GCHeapUtilities::GetGCHeap()->FixAllocContext(&m_alloc_context, NULL, NULL); m_alloc_context.init(); } } // We switch a thread to dead when it has finished doing useful work. But it // remains in the thread store so long as someone keeps it alive. An exposed // object will do this (it releases the refcount in its finalizer). If the // thread is never released, we have another look during product shutdown and // account for the unreleased refcount of the uncollected exposed object: if (IsDead()) { GCX_COOP(); _ASSERTE(IsAtProcessExit()); ClearContext(); if (m_ExposedObject != NULL) DecExternalCount(holdingLock); // may destruct now } else { #ifdef DEBUGGING_SUPPORTED // // If we're debugging, let the debugger know that this thread is // gone. // // There is a race here where the debugger could have attached after // we checked (and thus didn't release the lock). In this case, // we can't call out to the debugger or we risk a deadlock. // if (!holdingLock && CORDebuggerAttached()) { g_pDebugInterface->DetachThread(this); } #endif // DEBUGGING_SUPPORTED #ifdef PROFILING_SUPPORTED // If a profiler is present, then notify the profiler of thread destroy { BEGIN_PROFILER_CALLBACK(CORProfilerTrackThreads()); GCX_PREEMP(); (&g_profControlBlock)->ThreadDestroyed((ThreadID) this); END_PROFILER_CALLBACK(); } #endif // PROFILING_SUPPORTED if (!holdingLock) { LOG((LF_SYNC, INFO3, "OnThreadTerminate obtain lock\n")); ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_OTHER); } if (GCHeapUtilities::IsGCHeapInitialized() && ThisThreadID != CurrentThreadID) { // We must be holding the ThreadStore lock in order to clean up alloc context. // We should never call FixAllocContext during GC. dead_threads_non_alloc_bytes += m_alloc_context.alloc_limit - m_alloc_context.alloc_ptr; GCHeapUtilities::GetGCHeap()->FixAllocContext(&m_alloc_context, NULL, NULL); m_alloc_context.init(); } FastInterlockOr((ULONG *) &m_State, TS_Dead); ThreadStore::s_pThreadStore->m_DeadThreadCount++; ThreadStore::s_pThreadStore->IncrementDeadThreadCountForGCTrigger(); if (IsUnstarted()) ThreadStore::s_pThreadStore->m_UnstartedThreadCount--; else { if (IsBackground()) ThreadStore::s_pThreadStore->m_BackgroundThreadCount--; } FastInterlockAnd((ULONG *) &m_State, ~(TS_Unstarted | TS_Background)); // // If this thread was told to trip for debugging between the // sending of the detach event above and the locking of the // thread store lock, then remove the flag and decrement the // global trap returning threads count. // if (!IsAtProcessExit()) { // A thread can't die during a GCPending, because the thread store's // lock is held by the GC thread. if (m_State & TS_DebugSuspendPending) UnmarkForSuspension(~TS_DebugSuspendPending); if (CurrentThreadID == ThisThreadID && IsAbortRequested()) { UnmarkThreadForAbort(); } } if (GetThreadHandle() != INVALID_HANDLE_VALUE) { if (m_ThreadHandleForClose == INVALID_HANDLE_VALUE) { m_ThreadHandleForClose = GetThreadHandle(); } SetThreadHandle (INVALID_HANDLE_VALUE); } m_OSThreadId = 0; // If nobody else is holding onto the thread, we may destruct it here: ULONG oldCount = DecExternalCount(TRUE); // If we are shutting down the process, we only have one thread active in the // system. So we can disregard all the reasons that hold this thread alive -- // TLS is about to be reclaimed anyway. if (IsAtProcessExit()) while (oldCount > 0) { oldCount = DecExternalCount(TRUE); } // ASSUME THAT THE THREAD IS DELETED, FROM HERE ON _ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >= 0); _ASSERTE(ThreadStore::s_pThreadStore->m_BackgroundThreadCount >= 0); _ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >= ThreadStore::s_pThreadStore->m_BackgroundThreadCount); _ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >= ThreadStore::s_pThreadStore->m_UnstartedThreadCount); _ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >= ThreadStore::s_pThreadStore->m_DeadThreadCount); // One of the components of OtherThreadsComplete() has changed, so check whether // we should now exit the EE. ThreadStore::CheckForEEShutdown(); if (ThisThreadID == CurrentThreadID) { // NULL out the thread block in the tls. We can't do this if we aren't on the // right thread. But this will only happen during a shutdown. And we've made // a "best effort" to reduce to a single thread before we begin the shutdown. SetThread(NULL); SetAppDomain(NULL); } if (!holdingLock) { LOG((LF_SYNC, INFO3, "OnThreadTerminate releasing lock\n")); ThreadSuspend::UnlockThreadStore(ThisThreadID == CurrentThreadID); } } } // Helper functions to check for duplicate handles. we only do this check if // a waitfor multiple fails. int __cdecl compareHandles( const void *arg1, const void *arg2 ) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; HANDLE h1 = *(HANDLE*)arg1; HANDLE h2 = *(HANDLE*)arg2; return (h1 == h2) ? 0 : ((h1 < h2) ? -1 : 1); } BOOL CheckForDuplicateHandles(int countHandles, HANDLE *handles) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; qsort(handles,countHandles,sizeof(HANDLE),compareHandles); for (int i=1; i < countHandles; i++) { if (handles[i-1] == handles[i]) return TRUE; } return FALSE; } //-------------------------------------------------------------------- // Based on whether this thread has a message pump, do the appropriate // style of Wait. //-------------------------------------------------------------------- DWORD Thread::DoAppropriateWait(int countHandles, HANDLE *handles, BOOL waitAll, DWORD millis, WaitMode mode, PendingSync *syncState) { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; INDEBUG(BOOL alertable = (mode & WaitMode_Alertable) != 0;); _ASSERTE(alertable || syncState == 0); struct Param { Thread *pThis; int countHandles; HANDLE *handles; BOOL waitAll; DWORD millis; WaitMode mode; DWORD dwRet; } param; param.pThis = this; param.countHandles = countHandles; param.handles = handles; param.waitAll = waitAll; param.millis = millis; param.mode = mode; param.dwRet = (DWORD) -1; EE_TRY_FOR_FINALLY(Param *, pParam, &param) { pParam->dwRet = pParam->pThis->DoAppropriateWaitWorker(pParam->countHandles, pParam->handles, pParam->waitAll, pParam->millis, pParam->mode); } EE_FINALLY { if (syncState) { if (!GOT_EXCEPTION() && param.dwRet >= WAIT_OBJECT_0 && param.dwRet < (DWORD)(WAIT_OBJECT_0 + countHandles)) { // This thread has been removed from syncblk waiting list by the signalling thread syncState->Restore(FALSE); } else syncState->Restore(TRUE); } _ASSERTE (param.dwRet != WAIT_IO_COMPLETION); } EE_END_FINALLY; return(param.dwRet); } DWORD Thread::DoAppropriateWait(AppropriateWaitFunc func, void *args, DWORD millis, WaitMode mode, PendingSync *syncState) { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; INDEBUG(BOOL alertable = (mode & WaitMode_Alertable) != 0;); _ASSERTE(alertable || syncState == 0); struct Param { Thread *pThis; AppropriateWaitFunc func; void *args; DWORD millis; WaitMode mode; DWORD dwRet; } param; param.pThis = this; param.func = func; param.args = args; param.millis = millis; param.mode = mode; param.dwRet = (DWORD) -1; EE_TRY_FOR_FINALLY(Param *, pParam, &param) { pParam->dwRet = pParam->pThis->DoAppropriateWaitWorker(pParam->func, pParam->args, pParam->millis, pParam->mode); } EE_FINALLY { if (syncState) { if (!GOT_EXCEPTION() && WAIT_OBJECT_0 == param.dwRet) { // This thread has been removed from syncblk waiting list by the signalling thread syncState->Restore(FALSE); } else syncState->Restore(TRUE); } _ASSERTE (WAIT_IO_COMPLETION != param.dwRet); } EE_END_FINALLY; return(param.dwRet); } #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT //-------------------------------------------------------------------- // helper to do message wait //-------------------------------------------------------------------- DWORD MsgWaitHelper(int numWaiters, HANDLE* phEvent, BOOL bWaitAll, DWORD millis, BOOL bAlertable) { STANDARD_VM_CONTRACT; DWORD flags = 0; DWORD dwReturn=WAIT_ABANDONED; // If we're going to pump, we cannot use WAIT_ALL. That's because the wait would // only be satisfied if a message arrives while the handles are signalled. If we // want true WAIT_ALL, we need to fire up a different thread in the MTA and wait // on its result. This isn't implemented yet. // // A change was added to WaitHandleNative::CorWaitMultipleNative to disable WaitAll // in an STA with more than one handle. if (bWaitAll) { if (numWaiters == 1) bWaitAll = FALSE; // The check that's supposed to prevent this condition from occuring, in WaitHandleNative::CorWaitMultipleNative, // is unfortunately behind FEATURE_COMINTEROP instead of FEATURE_COMINTEROP_APARTMENT_SUPPORT. // So on CoreCLR (where FEATURE_COMINTEROP is not currently defined) we can actually reach this point. // We can't fix this, because it's a breaking change, so we just won't assert here. // The result is that WaitAll on an STA thread in CoreCLR will behave stragely, as described above. } if (bWaitAll) flags |= COWAIT_WAITALL; if (bAlertable) flags |= COWAIT_ALERTABLE; // CoWaitForMultipleHandles does not support more than 63 handles. It returns RPC_S_CALLPENDING for more than 63 handles // that is impossible to differentiate from timeout. if (numWaiters > 63) COMPlusThrow(kNotSupportedException, W("NotSupported_MaxWaitHandles_STA")); HRESULT hr = CoWaitForMultipleHandles(flags, millis, numWaiters, phEvent, &dwReturn); if (hr == RPC_S_CALLPENDING) { dwReturn = WAIT_TIMEOUT; } else if (FAILED(hr)) { // The service behaves differently on an STA vs. MTA in how much // error information it propagates back, and in which form. We currently // only get here in the STA case, so bias this logic that way. dwReturn = WAIT_FAILED; } else { dwReturn += WAIT_OBJECT_0; // success -- bias back } return dwReturn; } #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT //-------------------------------------------------------------------- // Do appropriate wait based on apartment state (STA or MTA) DWORD Thread::DoAppropriateAptStateWait(int numWaiters, HANDLE* pHandles, BOOL bWaitAll, DWORD timeout, WaitMode mode) { STANDARD_VM_CONTRACT; BOOL alertable = (mode & WaitMode_Alertable) != 0; #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT if (alertable && !GetDomain()->MustForceTrivialWaitOperations()) { ApartmentState as = GetFinalApartment(); if (AS_InMTA != as) { return MsgWaitHelper(numWaiters, pHandles, bWaitAll, timeout, alertable); } } #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT return WaitForMultipleObjectsEx(numWaiters, pHandles, bWaitAll, timeout, alertable); } // A helper called by our two flavors of DoAppropriateWaitWorker void Thread::DoAppropriateWaitWorkerAlertableHelper(WaitMode mode) { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; // A word about ordering for Interrupt. If someone tries to interrupt a thread // that's in the interruptible state, we queue an APC. But if they try to interrupt // a thread that's not in the interruptible state, we just record that fact. So // we have to set TS_Interruptible before we test to see whether someone wants to // interrupt us or else we have a race condition that causes us to skip the APC. FastInterlockOr((ULONG *) &m_State, TS_Interruptible); if (HasThreadStateNC(TSNC_InRestoringSyncBlock)) { // The thread is restoring SyncBlock for Object.Wait. ResetThreadStateNC(TSNC_InRestoringSyncBlock); } else { HandleThreadInterrupt(); // Safe to clear the interrupted state, no APC could have fired since we // reset m_UserInterrupt (which inhibits our APC callback from doing // anything). FastInterlockAnd((ULONG *) &m_State, ~TS_Interrupted); } } void MarkOSAlertableWait() { LIMITED_METHOD_CONTRACT; GetThread()->SetThreadStateNC (Thread::TSNC_OSAlertableWait); } void UnMarkOSAlertableWait() { LIMITED_METHOD_CONTRACT; GetThread()->ResetThreadStateNC (Thread::TSNC_OSAlertableWait); } //-------------------------------------------------------------------- // Based on whether this thread has a message pump, do the appropriate // style of Wait. //-------------------------------------------------------------------- DWORD Thread::DoAppropriateWaitWorker(int countHandles, HANDLE *handles, BOOL waitAll, DWORD millis, WaitMode mode) { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; DWORD ret = 0; BOOL alertable = (mode & WaitMode_Alertable) != 0; // Waits from SynchronizationContext.WaitHelper are always just WaitMode_IgnoreSyncCtx. // So if we defer to a sync ctx, we will lose any extra bits. We must therefore not // defer to a sync ctx if doing any non-default wait. // If you're doing a default wait, but want to ignore sync ctx, specify WaitMode_IgnoreSyncCtx // which will make mode != WaitMode_Alertable. BOOL ignoreSyncCtx = (mode != WaitMode_Alertable); if (GetDomain()->MustForceTrivialWaitOperations()) ignoreSyncCtx = TRUE; // Unless the ignoreSyncCtx flag is set, first check to see if there is a synchronization // context on the current thread and if there is, dispatch to it to do the wait. // If the wait is non alertable we cannot forward the call to the sync context // since fundamental parts of the system (such as the GC) rely on non alertable // waits not running any managed code. Also if we are past the point in shutdown were we // are allowed to run managed code then we can't forward the call to the sync context. if (!ignoreSyncCtx && alertable && !HasThreadStateNC(Thread::TSNC_BlockedForShutdown)) { GCX_COOP(); BOOL fSyncCtxPresent = FALSE; OBJECTREF SyncCtxObj = NULL; GCPROTECT_BEGIN(SyncCtxObj) { GetSynchronizationContext(&SyncCtxObj); if (SyncCtxObj != NULL) { SYNCHRONIZATIONCONTEXTREF syncRef = (SYNCHRONIZATIONCONTEXTREF)SyncCtxObj; if (syncRef->IsWaitNotificationRequired()) { fSyncCtxPresent = TRUE; ret = DoSyncContextWait(&SyncCtxObj, countHandles, handles, waitAll, millis); } } } GCPROTECT_END(); if (fSyncCtxPresent) return ret; } // Before going to pre-emptive mode the thread needs to be flagged as waiting for // the debugger. This used to be accomplished by the TS_Interruptible flag but that // doesn't work reliably, see DevDiv Bugs 699245. Some methods call in here already in // COOP mode so we set the bit before the transition. For the calls that are already // in pre-emptive mode those are still buggy. This is only a partial fix. BOOL isCoop = PreemptiveGCDisabled(); ThreadStateNCStackHolder tsNC(isCoop && alertable, TSNC_DebuggerSleepWaitJoin); GCX_PREEMP(); if (alertable) { DoAppropriateWaitWorkerAlertableHelper(mode); } StateHolder<MarkOSAlertableWait,UnMarkOSAlertableWait> OSAlertableWait(alertable); ThreadStateHolder tsh(alertable, TS_Interruptible | TS_Interrupted); ULONGLONG dwStart = 0, dwEnd; retry: if (millis != INFINITE) { dwStart = CLRGetTickCount64(); } ret = DoAppropriateAptStateWait(countHandles, handles, waitAll, millis, mode); if (ret == WAIT_IO_COMPLETION) { _ASSERTE (alertable); if (m_State & TS_Interrupted) { HandleThreadInterrupt(); } // We could be woken by some spurious APC or an EE APC queued to // interrupt us. In the latter case the TS_Interrupted bit will be set // in the thread state bits. Otherwise we just go back to sleep again. if (millis != INFINITE) { dwEnd = CLRGetTickCount64(); if (dwEnd >= dwStart + millis) { ret = WAIT_TIMEOUT; goto WaitCompleted; } else { millis -= (DWORD)(dwEnd - dwStart); } } goto retry; } _ASSERTE((ret >= WAIT_OBJECT_0 && ret < (WAIT_OBJECT_0 + (DWORD)countHandles)) || (ret >= WAIT_ABANDONED && ret < (WAIT_ABANDONED + (DWORD)countHandles)) || (ret == WAIT_TIMEOUT) || (ret == WAIT_FAILED)); // countHandles is used as an unsigned -- it should never be negative. _ASSERTE(countHandles >= 0); // We support precisely one WAIT_FAILED case, where we attempt to wait on a // thread handle and the thread is in the process of dying we might get a // invalid handle substatus. Turn this into a successful wait. // There are three cases to consider: // 1) Only waiting on one handle: return success right away. // 2) Waiting for all handles to be signalled: retry the wait without the // affected handle. // 3) Waiting for one of multiple handles to be signalled: return with the // first handle that is either signalled or has become invalid. if (ret == WAIT_FAILED) { DWORD errorCode = ::GetLastError(); if (errorCode == ERROR_INVALID_PARAMETER) { if (CheckForDuplicateHandles(countHandles, handles)) COMPlusThrow(kDuplicateWaitObjectException); else COMPlusThrowHR(HRESULT_FROM_WIN32(errorCode)); } else if (errorCode == ERROR_ACCESS_DENIED) { // A Win32 ACL could prevent us from waiting on the handle. COMPlusThrow(kUnauthorizedAccessException); } else if (errorCode == ERROR_NOT_ENOUGH_MEMORY) { ThrowOutOfMemory(); } #ifdef TARGET_UNIX else if (errorCode == ERROR_NOT_SUPPORTED) { // "Wait for any" and "wait for all" operations on multiple wait handles are not supported when a cross-process sync // object is included in the array COMPlusThrow(kPlatformNotSupportedException, W("PlatformNotSupported_NamedSyncObjectWaitAnyWaitAll")); } #endif else if (errorCode != ERROR_INVALID_HANDLE) { ThrowWin32(errorCode); } if (countHandles == 1) ret = WAIT_OBJECT_0; else if (waitAll) { // Probe all handles with a timeout of zero. When we find one that's // invalid, move it out of the list and retry the wait. for (int i = 0; i < countHandles; i++) { // WaitForSingleObject won't pump memssage; we already probe enough space // before calling this function and we don't want to fail here, so we don't // do a transition to tolerant code here DWORD subRet = WaitForSingleObject (handles[i], 0); if (subRet != WAIT_FAILED) continue; _ASSERTE(::GetLastError() == ERROR_INVALID_HANDLE); if ((countHandles - i - 1) > 0) memmove(&handles[i], &handles[i+1], (countHandles - i - 1) * sizeof(HANDLE)); countHandles--; break; } // Compute the new timeout value by assume that the timeout // is not large enough for more than one wrap dwEnd = CLRGetTickCount64(); if (millis != INFINITE) { if (dwEnd >= dwStart + millis) { ret = WAIT_TIMEOUT; goto WaitCompleted; } else { millis -= (DWORD)(dwEnd - dwStart); } } goto retry; } else { // Probe all handles with a timeout as zero, succeed with the first // handle that doesn't timeout. ret = WAIT_OBJECT_0; int i; for (i = 0; i < countHandles; i++) { TryAgain: // WaitForSingleObject won't pump memssage; we already probe enough space // before calling this function and we don't want to fail here, so we don't // do a transition to tolerant code here DWORD subRet = WaitForSingleObject (handles[i], 0); if ((subRet == WAIT_OBJECT_0) || (subRet == WAIT_FAILED)) break; if (subRet == WAIT_ABANDONED) { ret = (ret - WAIT_OBJECT_0) + WAIT_ABANDONED; break; } // If we get alerted it just masks the real state of the current // handle, so retry the wait. if (subRet == WAIT_IO_COMPLETION) goto TryAgain; _ASSERTE(subRet == WAIT_TIMEOUT); ret++; } } } WaitCompleted: _ASSERTE((ret != WAIT_TIMEOUT) || (millis != INFINITE)); return ret; } DWORD Thread::DoAppropriateWaitWorker(AppropriateWaitFunc func, void *args, DWORD millis, WaitMode mode) { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; BOOL alertable = (mode & WaitMode_Alertable)!=0; // Before going to pre-emptive mode the thread needs to be flagged as waiting for // the debugger. This used to be accomplished by the TS_Interruptible flag but that // doesn't work reliably, see DevDiv Bugs 699245. Some methods call in here already in // COOP mode so we set the bit before the transition. For the calls that are already // in pre-emptive mode those are still buggy. This is only a partial fix. BOOL isCoop = PreemptiveGCDisabled(); ThreadStateNCStackHolder tsNC(isCoop && alertable, TSNC_DebuggerSleepWaitJoin); GCX_PREEMP(); // <TODO> // @TODO cwb: we don't know whether a thread has a message pump or // how to pump its messages, currently. // @TODO cwb: WinCE isn't going to support Thread.Interrupt() correctly until // we get alertable waits on that platform.</TODO> DWORD ret; if(alertable) { DoAppropriateWaitWorkerAlertableHelper(mode); } DWORD option; if (alertable) { option = WAIT_ALERTABLE; #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT ApartmentState as = GetFinalApartment(); if ((AS_InMTA != as) && !GetDomain()->MustForceTrivialWaitOperations()) { option |= WAIT_MSGPUMP; } #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT } else { option = 0; } ThreadStateHolder tsh(alertable, TS_Interruptible | TS_Interrupted); ULONGLONG dwStart = 0; ULONGLONG dwEnd; retry: if (millis != INFINITE) { dwStart = CLRGetTickCount64(); } ret = func(args, millis, option); if (ret == WAIT_IO_COMPLETION) { _ASSERTE (alertable); if ((m_State & TS_Interrupted)) { HandleThreadInterrupt(); } if (millis != INFINITE) { dwEnd = CLRGetTickCount64(); if (dwEnd >= dwStart + millis) { ret = WAIT_TIMEOUT; goto WaitCompleted; } else { millis -= (DWORD)(dwEnd - dwStart); } } goto retry; } WaitCompleted: _ASSERTE(ret == WAIT_OBJECT_0 || ret == WAIT_ABANDONED || ret == WAIT_TIMEOUT || ret == WAIT_FAILED); _ASSERTE((ret != WAIT_TIMEOUT) || (millis != INFINITE)); return ret; } //-------------------------------------------------------------------- // Only one style of wait for DoSignalAndWait since we don't support this on STA Threads //-------------------------------------------------------------------- DWORD Thread::DoSignalAndWait(HANDLE *handles, DWORD millis, BOOL alertable, PendingSync *syncState) { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; _ASSERTE(alertable || syncState == 0); struct Param { Thread *pThis; HANDLE *handles; DWORD millis; BOOL alertable; DWORD dwRet; } param; param.pThis = this; param.handles = handles; param.millis = millis; param.alertable = alertable; param.dwRet = (DWORD) -1; EE_TRY_FOR_FINALLY(Param *, pParam, &param) { pParam->dwRet = pParam->pThis->DoSignalAndWaitWorker(pParam->handles, pParam->millis, pParam->alertable); } EE_FINALLY { if (syncState) { if (!GOT_EXCEPTION() && WAIT_OBJECT_0 == param.dwRet) { // This thread has been removed from syncblk waiting list by the signalling thread syncState->Restore(FALSE); } else syncState->Restore(TRUE); } _ASSERTE (WAIT_IO_COMPLETION != param.dwRet); } EE_END_FINALLY; return(param.dwRet); } DWORD Thread::DoSignalAndWaitWorker(HANDLE* pHandles, DWORD millis,BOOL alertable) { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; DWORD ret = 0; GCX_PREEMP(); if(alertable) { DoAppropriateWaitWorkerAlertableHelper(WaitMode_None); } StateHolder<MarkOSAlertableWait,UnMarkOSAlertableWait> OSAlertableWait(alertable); ThreadStateHolder tsh(alertable, TS_Interruptible | TS_Interrupted); ULONGLONG dwStart = 0, dwEnd; if (INFINITE != millis) { dwStart = CLRGetTickCount64(); } ret = SignalObjectAndWait(pHandles[0], pHandles[1], millis, alertable); retry: if (WAIT_IO_COMPLETION == ret) { _ASSERTE (alertable); // We could be woken by some spurious APC or an EE APC queued to // interrupt us. In the latter case the TS_Interrupted bit will be set // in the thread state bits. Otherwise we just go back to sleep again. if ((m_State & TS_Interrupted)) { HandleThreadInterrupt(); } if (INFINITE != millis) { dwEnd = CLRGetTickCount64(); if (dwStart + millis <= dwEnd) { ret = WAIT_TIMEOUT; goto WaitCompleted; } else { millis -= (DWORD)(dwEnd - dwStart); } dwStart = CLRGetTickCount64(); } //Retry case we don't want to signal again so only do the wait... ret = WaitForSingleObjectEx(pHandles[1],millis,TRUE); goto retry; } if (WAIT_FAILED == ret) { DWORD errorCode = ::GetLastError(); //If the handle to signal is a mutex and // the calling thread is not the owner, errorCode is ERROR_NOT_OWNER switch(errorCode) { case ERROR_INVALID_HANDLE: case ERROR_NOT_OWNER: case ERROR_ACCESS_DENIED: COMPlusThrowWin32(); break; case ERROR_TOO_MANY_POSTS: ret = ERROR_TOO_MANY_POSTS; break; default: CONSISTENCY_CHECK_MSGF(0, ("This errorCode is not understood '(%d)''\n", errorCode)); COMPlusThrowWin32(); break; } } WaitCompleted: //Check that the return state is valid _ASSERTE(WAIT_OBJECT_0 == ret || WAIT_ABANDONED == ret || WAIT_TIMEOUT == ret || WAIT_FAILED == ret || ERROR_TOO_MANY_POSTS == ret); //Wrong to time out if the wait was infinite _ASSERTE((WAIT_TIMEOUT != ret) || (INFINITE != millis)); return ret; } DWORD Thread::DoSyncContextWait(OBJECTREF *pSyncCtxObj, int countHandles, HANDLE *handles, BOOL waitAll, DWORD millis) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(CheckPointer(handles)); PRECONDITION(IsProtectedByGCFrame (pSyncCtxObj)); } CONTRACTL_END; MethodDescCallSite invokeWaitMethodHelper(METHOD__SYNCHRONIZATION_CONTEXT__INVOKE_WAIT_METHOD_HELPER); BASEARRAYREF handleArrayObj = (BASEARRAYREF)AllocatePrimitiveArray(ELEMENT_TYPE_I, countHandles); memcpyNoGCRefs(handleArrayObj->GetDataPtr(), handles, countHandles * sizeof(HANDLE)); ARG_SLOT args[6] = { ObjToArgSlot(*pSyncCtxObj), ObjToArgSlot(handleArrayObj), BoolToArgSlot(waitAll), (ARG_SLOT)millis, }; // Needed by TriggerGCForMDAInternal to avoid infinite recursion ThreadStateNCStackHolder holder(TRUE, TSNC_InsideSyncContextWait); return invokeWaitMethodHelper.Call_RetI4(args); } // Called out of SyncBlock::Wait() to block this thread until the Notify occurs. BOOL Thread::Block(INT32 timeOut, PendingSync *syncState) { WRAPPER_NO_CONTRACT; _ASSERTE(this == GetThread()); // Before calling Block, the SyncBlock queued us onto it's list of waiting threads. // However, before calling Block the SyncBlock temporarily left the synchronized // region. This allowed threads to enter the region and call Notify, in which // case we may have been signalled before we entered the Wait. So we aren't in the // m_WaitSB list any longer. Not a problem: the following Wait will return // immediately. But it means we cannot enforce the following assertion: // _ASSERTE(m_WaitSB != NULL); return (Wait(syncState->m_WaitEventLink->m_Next->m_EventWait, timeOut, syncState) != WAIT_OBJECT_0); } // Return whether or not a timeout occurred. TRUE=>we waited successfully DWORD Thread::Wait(HANDLE *objs, int cntObjs, INT32 timeOut, PendingSync *syncInfo) { WRAPPER_NO_CONTRACT; DWORD dwResult; DWORD dwTimeOut32; _ASSERTE(timeOut >= 0 || timeOut == INFINITE_TIMEOUT); dwTimeOut32 = (timeOut == INFINITE_TIMEOUT ? INFINITE : (DWORD) timeOut); dwResult = DoAppropriateWait(cntObjs, objs, FALSE /*=waitAll*/, dwTimeOut32, WaitMode_Alertable /*alertable*/, syncInfo); // Either we succeeded in the wait, or we timed out _ASSERTE((dwResult >= WAIT_OBJECT_0 && dwResult < (DWORD)(WAIT_OBJECT_0 + cntObjs)) || (dwResult == WAIT_TIMEOUT)); return dwResult; } // Return whether or not a timeout occurred. TRUE=>we waited successfully DWORD Thread::Wait(CLREvent *pEvent, INT32 timeOut, PendingSync *syncInfo) { WRAPPER_NO_CONTRACT; DWORD dwResult; DWORD dwTimeOut32; _ASSERTE(timeOut >= 0 || timeOut == INFINITE_TIMEOUT); dwTimeOut32 = (timeOut == INFINITE_TIMEOUT ? INFINITE : (DWORD) timeOut); dwResult = pEvent->Wait(dwTimeOut32, TRUE /*alertable*/, syncInfo); // Either we succeeded in the wait, or we timed out _ASSERTE((dwResult == WAIT_OBJECT_0) || (dwResult == WAIT_TIMEOUT)); return dwResult; } void Thread::Wake(SyncBlock *psb) { WRAPPER_NO_CONTRACT; CLREvent* hEvent = NULL; WaitEventLink *walk = &m_WaitEventLink; while (walk->m_Next) { if (walk->m_Next->m_WaitSB == psb) { hEvent = walk->m_Next->m_EventWait; // We are guaranteed that only one thread can change walk->m_Next->m_WaitSB // since the thread is helding the syncblock. walk->m_Next->m_WaitSB = (SyncBlock*)((DWORD_PTR)walk->m_Next->m_WaitSB | 1); break; } #ifdef _DEBUG else if ((SyncBlock*)((DWORD_PTR)walk->m_Next & ~1) == psb) { _ASSERTE (!"Can not wake a thread on the same SyncBlock more than once"); } #endif } PREFIX_ASSUME (hEvent != NULL); hEvent->Set(); } #define WAIT_INTERRUPT_THREADABORT 0x1 #define WAIT_INTERRUPT_INTERRUPT 0x2 #define WAIT_INTERRUPT_OTHEREXCEPTION 0x4 // When we restore DWORD EnterMonitorForRestore(SyncBlock *pSB) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; DWORD state = 0; EX_TRY { pSB->EnterMonitor(); } EX_CATCH { // Assume it is a normal exception unless proven. state = WAIT_INTERRUPT_OTHEREXCEPTION; Thread *pThread = GetThread(); if (pThread->IsAbortInitiated()) { state = WAIT_INTERRUPT_THREADABORT; } else if (__pException != NULL) { if (__pException->GetHR() == COR_E_THREADINTERRUPTED) { state = WAIT_INTERRUPT_INTERRUPT; } } } EX_END_CATCH(SwallowAllExceptions); return state; } // This is the service that backs us out of a wait that we interrupted. We must // re-enter the monitor to the same extent the SyncBlock would, if we returned // through it (instead of throwing through it). And we need to cancel the wait, // if it didn't get notified away while we are processing the interrupt. void PendingSync::Restore(BOOL bRemoveFromSB) { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; _ASSERTE(m_EnterCount); Thread *pCurThread = GetThread(); _ASSERTE (pCurThread == m_OwnerThread); WaitEventLink *pRealWaitEventLink = m_WaitEventLink->m_Next; pRealWaitEventLink->m_RefCount --; if (pRealWaitEventLink->m_RefCount == 0) { if (bRemoveFromSB) { ThreadQueue::RemoveThread(pCurThread, pRealWaitEventLink->m_WaitSB); } if (pRealWaitEventLink->m_EventWait != &pCurThread->m_EventWait) { // Put the event back to the pool. StoreEventToEventStore(pRealWaitEventLink->m_EventWait); } // Remove from the link. m_WaitEventLink->m_Next = m_WaitEventLink->m_Next->m_Next; } // Someone up the stack is responsible for keeping the syncblock alive by protecting // the object that owns it. But this relies on assertions that EnterMonitor is only // called in cooperative mode. Even though we are safe in preemptive, do the // switch. GCX_COOP_THREAD_EXISTS(pCurThread); // We need to make sure that EnterMonitor succeeds. We may have code like // lock (a) // { // a.Wait // } // We need to make sure that the finally from lock is excuted with the lock owned. DWORD state = 0; SyncBlock *psb = (SyncBlock*)((DWORD_PTR)pRealWaitEventLink->m_WaitSB & ~1); for (LONG i=0; i < m_EnterCount;) { if ((state & (WAIT_INTERRUPT_THREADABORT | WAIT_INTERRUPT_INTERRUPT)) != 0) { // If the thread has been interrupted by Thread.Interrupt or Thread.Abort, // disable the check at the beginning of DoAppropriateWait pCurThread->SetThreadStateNC(Thread::TSNC_InRestoringSyncBlock); } DWORD result = EnterMonitorForRestore(psb); if (result == 0) { i++; } else { // We block the thread until the thread acquires the lock. // This is to make sure that when catch/finally is executed, the thread has the lock. // We do not want thread to run its catch/finally if the lock is not taken. state |= result; // If the thread is being rudely aborted, and the thread has // no Cer on stack, we will not run managed code to release the // lock, so we can terminate the loop. if (pCurThread->IsRudeAbortInitiated() && !pCurThread->IsExecutingWithinCer()) { break; } } } pCurThread->ResetThreadStateNC(Thread::TSNC_InRestoringSyncBlock); if ((state & WAIT_INTERRUPT_THREADABORT) != 0) { pCurThread->HandleThreadAbort(); } else if ((state & WAIT_INTERRUPT_INTERRUPT) != 0) { COMPlusThrow(kThreadInterruptedException); } } // This is the callback from the OS, when we queue an APC to interrupt a waiting thread. // The callback occurs on the thread we wish to interrupt. It is a STATIC method. void WINAPI Thread::UserInterruptAPC(ULONG_PTR data) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; _ASSERTE(data == APC_Code); Thread *pCurThread = GetThreadNULLOk(); if (pCurThread) { // We should only take action if an interrupt is currently being // requested (our synchronization does not guarantee that we won't fire // spuriously). It's safe to check the m_UserInterrupt field and then // set TS_Interrupted in a non-atomic fashion because m_UserInterrupt is // only cleared in this thread's context (though it may be set from any // context). if (pCurThread->IsUserInterrupted()) { // Set bit to indicate this routine was called (as opposed to other // generic APCs). FastInterlockOr((ULONG *) &pCurThread->m_State, TS_Interrupted); } } } // This is the workhorse for Thread.Interrupt(). void Thread::UserInterrupt(ThreadInterruptMode mode) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; FastInterlockOr((DWORD*)&m_UserInterrupt, mode); if (HasValidThreadHandle() && HasThreadState (TS_Interruptible)) { Alert(); } } // Implementation of Thread.Sleep(). void Thread::UserSleep(INT32 time) { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; INCONTRACT(_ASSERTE(!GetThread()->GCNoTrigger())); DWORD res; // Before going to pre-emptive mode the thread needs to be flagged as waiting for // the debugger. This used to be accomplished by the TS_Interruptible flag but that // doesn't work reliably, see DevDiv Bugs 699245. ThreadStateNCStackHolder tsNC(TRUE, TSNC_DebuggerSleepWaitJoin); GCX_PREEMP(); // A word about ordering for Interrupt. If someone tries to interrupt a thread // that's in the interruptible state, we queue an APC. But if they try to interrupt // a thread that's not in the interruptible state, we just record that fact. So // we have to set TS_Interruptible before we test to see whether someone wants to // interrupt us or else we have a race condition that causes us to skip the APC. FastInterlockOr((ULONG *) &m_State, TS_Interruptible); // If someone has interrupted us, we should not enter the wait. if (IsUserInterrupted()) { HandleThreadInterrupt(); } ThreadStateHolder tsh(TRUE, TS_Interruptible | TS_Interrupted); FastInterlockAnd((ULONG *) &m_State, ~TS_Interrupted); DWORD dwTime = (DWORD)time; retry: ULONGLONG start = CLRGetTickCount64(); res = ClrSleepEx (dwTime, TRUE); if (res == WAIT_IO_COMPLETION) { // We could be woken by some spurious APC or an EE APC queued to // interrupt us. In the latter case the TS_Interrupted bit will be set // in the thread state bits. Otherwise we just go back to sleep again. if ((m_State & TS_Interrupted)) { HandleThreadInterrupt(); } if (dwTime == INFINITE) { goto retry; } else { ULONGLONG actDuration = CLRGetTickCount64() - start; if (dwTime > actDuration) { dwTime -= (DWORD)actDuration; goto retry; } else { res = WAIT_TIMEOUT; } } } _ASSERTE(res == WAIT_TIMEOUT || res == WAIT_OBJECT_0); } // Correspondence between an EE Thread and an exposed System.Thread: OBJECTREF Thread::GetExposedObject() { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; TRIGGERSGC(); Thread *pCurThread = GetThreadNULLOk(); _ASSERTE (!(pCurThread == NULL || IsAtProcessExit())); _ASSERTE(pCurThread->PreemptiveGCDisabled()); if (ObjectFromHandle(m_ExposedObject) == NULL) { // Allocate the exposed thread object. THREADBASEREF attempt = (THREADBASEREF) AllocateObject(g_pThreadClass); GCPROTECT_BEGIN(attempt); // The exposed object keeps us alive until it is GC'ed. This // doesn't mean the physical thread continues to run, of course. // We have to set this outside of the ThreadStore lock, because this might trigger a GC. attempt->SetInternal(this); BOOL fNeedThreadStore = (! ThreadStore::HoldingThreadStore(pCurThread)); // Take a lock to make sure that only one thread creates the object. ThreadStoreLockHolder tsHolder(fNeedThreadStore); // Check to see if another thread has not already created the exposed object. if (ObjectFromHandle(m_ExposedObject) == NULL) { // Keep a weak reference to the exposed object. StoreObjectInHandle(m_ExposedObject, (OBJECTREF) attempt); ObjectInHandleHolder exposedHolder(m_ExposedObject); // Increase the external ref count. We can't call IncExternalCount because we // already hold the thread lock and IncExternalCount won't be able to take it. ULONG retVal = FastInterlockIncrement ((LONG*)&m_ExternalRefCount); // Check to see if we need to store a strong pointer to the object. if (retVal > 1) StoreObjectInHandle(m_StrongHndToExposedObject, (OBJECTREF) attempt); ObjectInHandleHolder strongHolder(m_StrongHndToExposedObject); attempt->SetManagedThreadId(GetThreadId()); // Note that we are NOT calling the constructor on the Thread. That's // because this is an internal create where we don't want a Start // address. And we don't want to expose such a constructor for our // customers to accidentally call. The following is in lieu of a true // constructor: attempt->InitExisting(); exposedHolder.SuppressRelease(); strongHolder.SuppressRelease(); } else { attempt->ClearInternal(); } GCPROTECT_END(); } return ObjectFromHandle(m_ExposedObject); } // We only set non NULL exposed objects for unstarted threads that haven't exited // their constructor yet. So there are no race conditions. void Thread::SetExposedObject(OBJECTREF exposed) { CONTRACTL { NOTHROW; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; if (exposed != NULL) { _ASSERTE (GetThreadNULLOk() != this); _ASSERTE(IsUnstarted()); _ASSERTE(ObjectFromHandle(m_ExposedObject) == NULL); // The exposed object keeps us alive until it is GC'ed. This doesn't mean the // physical thread continues to run, of course. StoreObjectInHandle(m_ExposedObject, exposed); // This makes sure the contexts on the backing thread // and the managed thread start off in sync with each other. // BEWARE: the IncExternalCount call below may cause GC to happen. // IncExternalCount will store exposed in m_StrongHndToExposedObject which is in default domain. // If the creating thread is killed before the target thread is killed in Thread.Start, Thread object // will be kept alive forever. // Instead, IncExternalCount should be called after the target thread has been started in Thread.Start. // IncExternalCount(); } else { // Simply set both of the handles to NULL. The GC of the old exposed thread // object will take care of decrementing the external ref count. StoreObjectInHandle(m_ExposedObject, NULL); StoreObjectInHandle(m_StrongHndToExposedObject, NULL); } } void Thread::SetLastThrownObject(OBJECTREF throwable, BOOL isUnhandled) { CONTRACTL { if ((throwable == NULL) || CLRException::IsPreallocatedExceptionObject(throwable)) NOTHROW; else THROWS; // From CreateHandle GC_NOTRIGGER; if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE; } CONTRACTL_END; STRESS_LOG_COND1(LF_EH, LL_INFO100, OBJECTREFToObject(throwable) != NULL, "in Thread::SetLastThrownObject: obj = %p\n", OBJECTREFToObject(throwable)); // you can't have a NULL unhandled exception _ASSERTE(!(throwable == NULL && isUnhandled)); if (m_LastThrownObjectHandle != NULL) { // We'll somtimes use a handle for a preallocated exception object. We should never, ever destroy one of // these handles... they'll be destroyed when the Runtime shuts down. if (!CLRException::IsPreallocatedExceptionHandle(m_LastThrownObjectHandle)) { DestroyHandle(m_LastThrownObjectHandle); } m_LastThrownObjectHandle = NULL; // Make sure to set this to NULL here just in case we throw trying to make // a new handle below. } if (throwable != NULL) { _ASSERTE(this == GetThread()); // Non-compliant exceptions are always wrapped. // The use of the ExceptionNative:: helper here (rather than the global ::IsException helper) // is hokey, but we need a GC_NOTRIGGER version and it's only for an ASSERT. _ASSERTE(IsException(throwable->GetMethodTable())); // If we're tracking one of the preallocated exception objects, then just use the global handle that // matches it rather than creating a new one. if (CLRException::IsPreallocatedExceptionObject(throwable)) { m_LastThrownObjectHandle = CLRException::GetPreallocatedHandleForObject(throwable); } else { m_LastThrownObjectHandle = GetDomain()->CreateHandle(throwable); } _ASSERTE(m_LastThrownObjectHandle != NULL); m_ltoIsUnhandled = isUnhandled; } else { m_ltoIsUnhandled = FALSE; } } void Thread::SetSOForLastThrownObject() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; CANNOT_TAKE_LOCK; } CONTRACTL_END; // If we are saving stack overflow exception, we can just null out the current handle. // The current domain is going to be unloaded or the process is going to be killed, so // we will not leak a handle. m_LastThrownObjectHandle = CLRException::GetPreallocatedStackOverflowExceptionHandle(); } // // This is a nice wrapper for SetLastThrownObject which catches any exceptions caused by not being able to create // the handle for the throwable, and setting the last thrown object to the preallocated out of memory exception // instead. // OBJECTREF Thread::SafeSetLastThrownObject(OBJECTREF throwable) { CONTRACTL { NOTHROW; GC_NOTRIGGER; if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE; } CONTRACTL_END; // We return the original throwable if nothing goes wrong. OBJECTREF ret = throwable; EX_TRY { // Try to set the throwable. SetLastThrownObject(throwable); } EX_CATCH { // If it didn't work, then set the last thrown object to the preallocated OOM exception, and return that // object instead of the original throwable. ret = CLRException::GetPreallocatedOutOfMemoryException(); SetLastThrownObject(ret); } EX_END_CATCH(SwallowAllExceptions); return ret; } // // This is a nice wrapper for SetThrowable and SetLastThrownObject, which catches any exceptions caused by not // being able to create the handle for the throwable, and sets the throwable to the preallocated out of memory // exception instead. It also updates the last thrown object, which is always updated when the throwable is // updated. // OBJECTREF Thread::SafeSetThrowables(OBJECTREF throwable DEBUG_ARG(ThreadExceptionState::SetThrowableErrorChecking stecFlags), BOOL isUnhandled) { CONTRACTL { NOTHROW; GC_NOTRIGGER; if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE; } CONTRACTL_END; // We return the original throwable if nothing goes wrong. OBJECTREF ret = throwable; EX_TRY { // Try to set the throwable. SetThrowable(throwable DEBUG_ARG(stecFlags)); // Now, if the last thrown object is different, go ahead and update it. This makes sure that we re-throw // the right object when we rethrow. if (LastThrownObject() != throwable) { SetLastThrownObject(throwable); } if (isUnhandled) { MarkLastThrownObjectUnhandled(); } } EX_CATCH { // If either set didn't work, then set both throwables to the preallocated OOM exception, and return that // object instead of the original throwable. ret = CLRException::GetPreallocatedOutOfMemoryException(); // Neither of these will throw because we're setting with a preallocated exception. SetThrowable(ret DEBUG_ARG(stecFlags)); SetLastThrownObject(ret, isUnhandled); } EX_END_CATCH(SwallowAllExceptions); return ret; } // This method will sync the managed exception state to be in sync with the topmost active exception // for a given thread void Thread::SyncManagedExceptionState(bool fIsDebuggerThread) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; { GCX_COOP(); // Syncup the LastThrownObject on the managed thread SafeUpdateLastThrownObject(); } } void Thread::SetLastThrownObjectHandle(OBJECTHANDLE h) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; if (m_LastThrownObjectHandle != NULL && !CLRException::IsPreallocatedExceptionHandle(m_LastThrownObjectHandle)) { DestroyHandle(m_LastThrownObjectHandle); } m_LastThrownObjectHandle = h; } // // Create a duplicate handle of the current throwable and set the last thrown object to that. This ensures that the // last thrown object and the current throwable have handles that are in the same app domain. // void Thread::SafeUpdateLastThrownObject(void) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; OBJECTHANDLE hThrowable = GetThrowableAsHandle(); if (hThrowable != NULL) { EX_TRY { IGCHandleManager *pHandleTable = GCHandleUtilities::GetGCHandleManager(); // Creating a duplicate handle here ensures that the AD of the last thrown object // matches the domain of the current throwable. OBJECTHANDLE duplicateHandle = pHandleTable->CreateDuplicateHandle(hThrowable); SetLastThrownObjectHandle(duplicateHandle); } EX_CATCH { // If we can't create a duplicate handle, we set both throwables to the preallocated OOM exception. SafeSetThrowables(CLRException::GetPreallocatedOutOfMemoryException()); } EX_END_CATCH(SwallowAllExceptions); } } // Background threads must be counted, because the EE should shut down when the // last non-background thread terminates. But we only count running ones. void Thread::SetBackground(BOOL isBack) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; // booleanize IsBackground() which just returns bits if (isBack == !!IsBackground()) return; BOOL lockHeld = HasThreadStateNC(Thread::TSNC_TSLTakenForStartup); _ASSERTE(!lockHeld || (lockHeld && ThreadStore::HoldingThreadStore())); LOG((LF_SYNC, INFO3, "SetBackground obtain lock\n")); ThreadStoreLockHolder TSLockHolder(!lockHeld); if (IsDead()) { // This can only happen in a race condition, where the correct thing to do // is ignore it. If it happens without the race condition, we throw an // exception. } else if (isBack) { if (!IsBackground()) { FastInterlockOr((ULONG *) &m_State, TS_Background); // unstarted threads don't contribute to the background count if (!IsUnstarted()) ThreadStore::s_pThreadStore->m_BackgroundThreadCount++; // If we put the main thread into a wait, until only background threads exist, // then we make that // main thread a background thread. This cleanly handles the case where it // may or may not be one as it enters the wait. // One of the components of OtherThreadsComplete() has changed, so check whether // we should now exit the EE. ThreadStore::CheckForEEShutdown(); } } else { if (IsBackground()) { FastInterlockAnd((ULONG *) &m_State, ~TS_Background); // unstarted threads don't contribute to the background count if (!IsUnstarted()) ThreadStore::s_pThreadStore->m_BackgroundThreadCount--; _ASSERTE(ThreadStore::s_pThreadStore->m_BackgroundThreadCount >= 0); _ASSERTE(ThreadStore::s_pThreadStore->m_BackgroundThreadCount <= ThreadStore::s_pThreadStore->m_ThreadCount); } } } #ifdef FEATURE_COMINTEROP class ApartmentSpyImpl : public IUnknownCommon<IInitializeSpy, IID_IInitializeSpy> { public: HRESULT STDMETHODCALLTYPE PreInitialize(DWORD dwCoInit, DWORD dwCurThreadAptRefs) { LIMITED_METHOD_CONTRACT; return S_OK; } HRESULT STDMETHODCALLTYPE PostInitialize(HRESULT hrCoInit, DWORD dwCoInit, DWORD dwNewThreadAptRefs) { LIMITED_METHOD_CONTRACT; return hrCoInit; // this HRESULT will be returned from CoInitialize(Ex) } HRESULT STDMETHODCALLTYPE PreUninitialize(DWORD dwCurThreadAptRefs) { // Don't assume that Thread exists and do not create it. STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_PREEMPTIVE; HRESULT hr = S_OK; if (dwCurThreadAptRefs == 1 && !g_fEEShutDown) { // This is the last CoUninitialize on this thread and the CLR is still running. If it's an STA // we take the opportunity to perform COM/WinRT cleanup now, when the apartment is still alive. Thread *pThread = GetThreadNULLOk(); if (pThread != NULL) { BEGIN_EXTERNAL_ENTRYPOINT(&hr) { if (pThread->GetFinalApartment() == Thread::AS_InSTA) { // This will release RCWs and purge the WinRT factory cache on all AppDomains. It // will also synchronize with the finalizer thread which ensures that the RCWs // that were already in the global RCW cleanup list will be cleaned up as well. // ReleaseRCWsInCachesNoThrow(GetCurrentCtxCookie()); } } END_EXTERNAL_ENTRYPOINT; } } return hr; } HRESULT STDMETHODCALLTYPE PostUninitialize(DWORD dwNewThreadAptRefs) { LIMITED_METHOD_CONTRACT; return S_OK; } }; #endif // FEATURE_COMINTEROP void Thread::PrepareApartmentAndContext() { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; #ifdef TARGET_UNIX m_OSThreadId = ::PAL_GetCurrentOSThreadId(); #else m_OSThreadId = ::GetCurrentThreadId(); #endif #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT // Be very careful in here because we haven't set up e.g. TLS yet. if (m_State & (TS_InSTA | TS_InMTA)) { // Make sure TS_InSTA and TS_InMTA aren't both set. _ASSERTE(!((m_State & TS_InSTA) && (m_State & TS_InMTA))); // Determine the apartment state to set based on the requested state. ApartmentState aState = m_State & TS_InSTA ? AS_InSTA : AS_InMTA; // Clear the requested apartment state from the thread. This is requested since // the thread might actually be a fiber that has already been initialized to // a different apartment state than the requested one. If we didn't clear // the requested apartment state, then we could end up with both TS_InSTA and // TS_InMTA set at the same time. FastInterlockAnd ((ULONG *) &m_State, ~TS_InSTA & ~TS_InMTA); // Attempt to set the requested apartment state. SetApartment(aState); } // In the case where we own the thread and we have switched it to a different // starting context, it is the responsibility of the caller (KickOffThread()) // to notice that the context changed, and to adjust the delegate that it will // dispatch on, as appropriate. #endif //FEATURE_COMINTEROP_APARTMENT_SUPPORT #ifdef FEATURE_COMINTEROP // Our IInitializeSpy will be registered in classic processes // only if the internal config switch is on. if (g_pConfig->EnableRCWCleanupOnSTAShutdown()) { NewHolder<ApartmentSpyImpl> pSpyImpl = new ApartmentSpyImpl(); IfFailThrow(CoRegisterInitializeSpy(pSpyImpl, &m_uliInitializeSpyCookie)); pSpyImpl.SuppressRelease(); m_fInitializeSpyRegistered = true; } #endif // FEATURE_COMINTEROP } #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT // TS_InSTA (0x00004000) -> AS_InSTA (0) // TS_InMTA (0x00008000) -> AS_InMTA (1) #define TS_TO_AS(ts) \ (Thread::ApartmentState)((((DWORD)ts) >> 14) - 1) \ // Retrieve the apartment state of the current thread. There are three possible // states: thread hosts an STA, thread is part of the MTA or thread state is // undecided. The last state may indicate that the apartment has not been set at // all (nobody has called CoInitializeEx) or that the EE does not know the // current state (EE has not called CoInitializeEx). Thread::ApartmentState Thread::GetApartment() { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; ApartmentState as = AS_Unknown; ThreadState maskedTs = (ThreadState)(((DWORD)m_State) & (TS_InSTA|TS_InMTA)); if (maskedTs) { _ASSERTE((maskedTs == TS_InSTA) || (maskedTs == TS_InMTA)); static_assert_no_msg(TS_TO_AS(TS_InSTA) == AS_InSTA); static_assert_no_msg(TS_TO_AS(TS_InMTA) == AS_InMTA); as = TS_TO_AS(maskedTs); } if (as != AS_Unknown) { return as; } return GetApartmentRare(as); } Thread::ApartmentState Thread::GetApartmentRare(Thread::ApartmentState as) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; if (this == GetThreadNULLOk()) { THDTYPE type; HRESULT hr = S_OK; if (as == AS_Unknown) { hr = GetCurrentThreadTypeNT5(&type); if (hr == S_OK) { as = (type == THDTYPE_PROCESSMESSAGES) ? AS_InSTA : AS_InMTA; // If we get back THDTYPE_PROCESSMESSAGES, we are guaranteed to // be an STA thread. If not, we are an MTA thread, however // we can't know if the thread has been explicitly set to MTA // (via a call to CoInitializeEx) or if it has been implicitly // made MTA (if it hasn't been CoInitializeEx'd but CoInitialize // has already been called on some other thread in the process. if (as == AS_InSTA) FastInterlockOr((ULONG *) &m_State, AS_InSTA); } } } return as; } // Retrieve the explicit apartment state of the current thread. There are three possible // states: thread hosts an STA, thread is part of the MTA or thread state is // undecided. The last state may indicate that the apartment has not been set at // all (nobody has called CoInitializeEx), the EE does not know the // current state (EE has not called CoInitializeEx), or the thread is implicitly in // the MTA. Thread::ApartmentState Thread::GetExplicitApartment() { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; _ASSERTE(!((m_State & TS_InSTA) && (m_State & TS_InMTA))); // Initialize m_State by calling GetApartment. GetApartment(); ApartmentState as = (m_State & TS_InSTA) ? AS_InSTA : (m_State & TS_InMTA) ? AS_InMTA : AS_Unknown; return as; } Thread::ApartmentState Thread::GetFinalApartment() { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; _ASSERTE(this == GetThread()); ApartmentState as = AS_Unknown; if (g_fEEShutDown) { // On shutdown, do not use cached value. Someone might have called // CoUninitialize. FastInterlockAnd ((ULONG *) &m_State, ~TS_InSTA & ~TS_InMTA); } as = GetApartment(); if (as == AS_Unknown) { // On Win2k and above, GetApartment will only return AS_Unknown if CoInitialize // hasn't been called in the process. In that case we can simply assume MTA. However we // cannot cache this value in the Thread because if a CoInitialize does occur, then the // thread state might change. as = AS_InMTA; } return as; } // when we get apartment tear-down notification, // we want reset the apartment state we cache on the thread VOID Thread::ResetApartment() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // reset the TS_InSTA bit and TS_InMTA bit ThreadState t_State = (ThreadState)(~(TS_InSTA | TS_InMTA)); FastInterlockAnd((ULONG *) &m_State, t_State); } // Attempt to set current thread's apartment state. The actual apartment state // achieved is returned and may differ from the input state if someone managed // to call CoInitializeEx on this thread first (note that calls to SetApartment // made before the thread has started are guaranteed to succeed). Thread::ApartmentState Thread::SetApartment(ApartmentState state) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; // Reset any bits that request for CoInitialize ResetRequiresCoInitialize(); // Setting the state to AS_Unknown indicates we should CoUninitialize // the thread. if (state == AS_Unknown) { BOOL needUninitialize = (m_State & TS_CoInitialized) #ifdef FEATURE_COMINTEROP || IsWinRTInitialized() #endif // FEATURE_COMINTEROP ; if (needUninitialize) { GCX_PREEMP(); // If we haven't CoInitialized the thread, then we don't have anything to do. if (m_State & TS_CoInitialized) { // We should never be attempting to CoUninitialize another thread than // the currently running thread. #ifdef TARGET_UNIX _ASSERTE(m_OSThreadId == ::PAL_GetCurrentOSThreadId()); #else _ASSERTE(m_OSThreadId == ::GetCurrentThreadId()); #endif // CoUninitialize the thread and reset the STA/MTA/CoInitialized state bits. ::CoUninitialize(); ThreadState uninitialized = static_cast<ThreadState>(TS_InSTA | TS_InMTA | TS_CoInitialized); FastInterlockAnd((ULONG *) &m_State, ~uninitialized); } #ifdef FEATURE_COMINTEROP if (IsWinRTInitialized()) { _ASSERTE(WinRTSupported()); BaseWinRTUninitialize(); ResetWinRTInitialized(); } #endif // FEATURE_COMINTEROP } return GetApartment(); } // Call GetApartment to initialize the current apartment state. // // Important note: For Win2k and above this can return AS_InMTA even if the current // thread has never been CoInitialized. Because of this we MUST NOT look at the // return value of GetApartment here. We can however look at the m_State flags // since these will only be set to TS_InMTA if we know for a fact the the // current thread has explicitly been made MTA (via a call to CoInitializeEx). GetApartment(); // If the current thread is STA, then it is impossible to change it to // MTA. if (m_State & TS_InSTA) { return AS_InSTA; } // If the current thread is EXPLICITLY MTA, then it is impossible to change it to // STA. if (m_State & TS_InMTA) { return AS_InMTA; } // If the thread isn't even started yet, we mark the state bits without // calling CoInitializeEx (since we're obviously not in the correct thread // context yet). We'll retry this call when the thread is started. // Don't use the TS_Unstarted state bit to check for this, it's cleared far // too late in the day for us. Instead check whether we're in the correct // thread context. #ifdef TARGET_UNIX if (m_OSThreadId != ::PAL_GetCurrentOSThreadId()) #else if (m_OSThreadId != ::GetCurrentThreadId()) #endif { FastInterlockOr((ULONG *) &m_State, (state == AS_InSTA) ? TS_InSTA : TS_InMTA); return state; } HRESULT hr; { GCX_PREEMP(); // Attempt to set apartment by calling CoInitializeEx. This may fail if // another caller (outside EE) beat us to it. // // Important note: When calling CoInitializeEx(COINIT_MULTITHREADED) on a // thread that has never been CoInitialized, the return value will always // be S_OK, even if another thread in the process has already been // CoInitialized to MTA. However if the current thread has already been // CoInitialized to MTA, then S_FALSE will be returned. hr = ::CoInitializeEx(NULL, (state == AS_InSTA) ? COINIT_APARTMENTTHREADED : COINIT_MULTITHREADED); } if (SUCCEEDED(hr)) { ThreadState t_State = (state == AS_InSTA) ? TS_InSTA : TS_InMTA; if (hr == S_OK) { // The thread has never been CoInitialized. t_State = (ThreadState)(t_State | TS_CoInitialized); } else { _ASSERTE(hr == S_FALSE); // If the thread has already been CoInitialized to the proper mode, then // we don't want to leave an outstanding CoInit so we CoUninit. { GCX_PREEMP(); ::CoUninitialize(); } } // We succeeded in setting the apartment state to the requested state. FastInterlockOr((ULONG *) &m_State, t_State); } else if (hr == RPC_E_CHANGED_MODE) { // We didn't manage to enforce the requested apartment state, but at least // we can work out what the state is now. No need to actually do the CoInit -- // obviously someone else already took care of that. FastInterlockOr((ULONG *) &m_State, ((state == AS_InSTA) ? TS_InMTA : TS_InSTA)); } else if (hr == E_OUTOFMEMORY) { COMPlusThrowOM(); } else if (hr == E_NOTIMPL) { COMPlusThrow(kPlatformNotSupportedException, IDS_EE_THREAD_APARTMENT_NOT_SUPPORTED, (state == AS_InSTA) ? W("STA") : W("MTA")); } else { _ASSERTE(!"Unexpected HRESULT returned from CoInitializeEx!"); } // If WinRT is supported on this OS, also initialize it at the same time. Since WinRT sits on top of COM // we need to make sure that it is initialized in the same threading mode as we just started COM itself // with (or that we detected COM had already been started with). if (WinRTSupported() && !IsWinRTInitialized()) { GCX_PREEMP(); BOOL isSTA = m_State & TS_InSTA; _ASSERTE(isSTA || (m_State & TS_InMTA)); HRESULT hrWinRT = RoInitialize(isSTA ? RO_INIT_SINGLETHREADED : RO_INIT_MULTITHREADED); if (SUCCEEDED(hrWinRT)) { if (hrWinRT == S_OK) { SetThreadStateNC(TSNC_WinRTInitialized); } else { _ASSERTE(hrWinRT == S_FALSE); // If the thread has already been initialized, back it out. We may not // always be able to call RoUninitialize on shutdown so if there's // a way to avoid having to, we should take advantage of that. RoUninitialize(); } } else if (hrWinRT == E_OUTOFMEMORY) { COMPlusThrowOM(); } else { // We don't check for RPC_E_CHANGEDMODE, since we're using the mode that was read in by // initializing COM above. COM and WinRT need to always be in the same mode, so we should never // see that return code at this point. _ASSERTE(!"Unexpected HRESULT From RoInitialize"); } } // Since we've just called CoInitialize, COM has effectively been started up. // To ensure the CLR is aware of this, we need to call EnsureComStarted. EnsureComStarted(FALSE); return GetApartment(); } #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT //---------------------------------------------------------------------------- // // ThreadStore Implementation // //---------------------------------------------------------------------------- ThreadStore::ThreadStore() : m_Crst(CrstThreadStore, (CrstFlags) (CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD)), m_ThreadCount(0), m_MaxThreadCount(0), m_UnstartedThreadCount(0), m_BackgroundThreadCount(0), m_PendingThreadCount(0), m_DeadThreadCount(0), m_DeadThreadCountForGCTrigger(0), m_TriggerGCForDeadThreads(false), m_HoldingThread(0) { CONTRACTL { THROWS; GC_NOTRIGGER; } CONTRACTL_END; m_TerminationEvent.CreateManualEvent(FALSE); _ASSERTE(m_TerminationEvent.IsValid()); } void ThreadStore::InitThreadStore() { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; s_pThreadStore = new ThreadStore; g_pThinLockThreadIdDispenser = new IdDispenser(); ThreadSuspend::g_pGCSuspendEvent = new CLREvent(); ThreadSuspend::g_pGCSuspendEvent->CreateManualEvent(FALSE); s_pWaitForStackCrawlEvent = new CLREvent(); s_pWaitForStackCrawlEvent->CreateManualEvent(FALSE); s_DeadThreadCountThresholdForGCTrigger = static_cast<LONG>(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_Thread_DeadThreadCountThresholdForGCTrigger)); if (s_DeadThreadCountThresholdForGCTrigger < 0) { s_DeadThreadCountThresholdForGCTrigger = 0; } s_DeadThreadGCTriggerPeriodMilliseconds = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_Thread_DeadThreadGCTriggerPeriodMilliseconds); s_DeadThreadGenerationCounts = nullptr; } // Enter and leave the critical section around the thread store. Clients should // use LockThreadStore and UnlockThreadStore because ThreadStore lock has // additional semantics well beyond a normal lock. DEBUG_NOINLINE void ThreadStore::Enter() { CONTRACTL { NOTHROW; GC_NOTRIGGER; // we must be in preemptive mode while taking this lock // if suspension is in progress, the lock is taken, and there is no way to suspend us once we block MODE_PREEMPTIVE; } CONTRACTL_END; ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT; CHECK_ONE_STORE(); m_Crst.Enter(); } DEBUG_NOINLINE void ThreadStore::Leave() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT; CHECK_ONE_STORE(); m_Crst.Leave(); } void ThreadStore::LockThreadStore() { WRAPPER_NO_CONTRACT; // The actual implementation is in ThreadSuspend class since it is coupled // with thread suspension logic ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_OTHER); } void ThreadStore::UnlockThreadStore() { WRAPPER_NO_CONTRACT; // The actual implementation is in ThreadSuspend class since it is coupled // with thread suspension logic ThreadSuspend::UnlockThreadStore(FALSE, ThreadSuspend::SUSPEND_OTHER); } // AddThread adds 'newThread' to m_ThreadList void ThreadStore::AddThread(Thread *newThread) { CONTRACTL { NOTHROW; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; LOG((LF_SYNC, INFO3, "AddThread obtain lock\n")); BOOL lockHeld = newThread->HasThreadStateNC(Thread::TSNC_TSLTakenForStartup); _ASSERTE(!lockHeld || (lockHeld && ThreadStore::HoldingThreadStore())); ThreadStoreLockHolder TSLockHolder(!lockHeld); s_pThreadStore->m_ThreadList.InsertTail(newThread); s_pThreadStore->m_ThreadCount++; if (s_pThreadStore->m_MaxThreadCount < s_pThreadStore->m_ThreadCount) s_pThreadStore->m_MaxThreadCount = s_pThreadStore->m_ThreadCount; if (newThread->IsUnstarted()) s_pThreadStore->m_UnstartedThreadCount++; newThread->SetThreadStateNC(Thread::TSNC_ExistInThreadStore); _ASSERTE(!newThread->IsBackground()); _ASSERTE(!newThread->IsDead()); } // this function is just desgined to avoid deadlocks during abnormal process termination, and should not be used for any other purpose BOOL ThreadStore::CanAcquireLock() { WRAPPER_NO_CONTRACT; { return (s_pThreadStore->m_Crst.m_criticalsection.LockCount == -1 || (size_t)s_pThreadStore->m_Crst.m_criticalsection.OwningThread == (size_t)GetCurrentThreadId()); } } // Whenever one of the components of OtherThreadsComplete() has changed in the // correct direction, see whether we can now shutdown the EE because only background // threads are running. void ThreadStore::CheckForEEShutdown() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; if (g_fWeControlLifetime && s_pThreadStore->OtherThreadsComplete()) { BOOL bRet; bRet = s_pThreadStore->m_TerminationEvent.Set(); _ASSERTE(bRet); } } BOOL ThreadStore::RemoveThread(Thread *target) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; BOOL found; Thread *ret; #if 0 // This assert is not valid when failing to create background GC thread. // Main GC thread holds the TS lock. _ASSERTE (ThreadStore::HoldingThreadStore()); #endif _ASSERTE(s_pThreadStore->m_Crst.GetEnterCount() > 0 || IsAtProcessExit()); _ASSERTE(s_pThreadStore->DbgFindThread(target)); ret = s_pThreadStore->m_ThreadList.FindAndRemove(target); _ASSERTE(ret && ret == target); found = (ret != NULL); if (found) { target->ResetThreadStateNC(Thread::TSNC_ExistInThreadStore); s_pThreadStore->m_ThreadCount--; if (target->IsDead()) { s_pThreadStore->m_DeadThreadCount--; s_pThreadStore->DecrementDeadThreadCountForGCTrigger(); } // Unstarted threads are not in the Background count: if (target->IsUnstarted()) s_pThreadStore->m_UnstartedThreadCount--; else if (target->IsBackground()) s_pThreadStore->m_BackgroundThreadCount--; FastInterlockExchangeAddLong( (LONGLONG *)&Thread::s_workerThreadPoolCompletionCountOverflow, target->m_workerThreadPoolCompletionCount); FastInterlockExchangeAddLong( (LONGLONG *)&Thread::s_ioThreadPoolCompletionCountOverflow, target->m_ioThreadPoolCompletionCount); FastInterlockExchangeAddLong( (LONGLONG *)&Thread::s_monitorLockContentionCountOverflow, target->m_monitorLockContentionCount); _ASSERTE(s_pThreadStore->m_ThreadCount >= 0); _ASSERTE(s_pThreadStore->m_BackgroundThreadCount >= 0); _ASSERTE(s_pThreadStore->m_ThreadCount >= s_pThreadStore->m_BackgroundThreadCount); _ASSERTE(s_pThreadStore->m_ThreadCount >= s_pThreadStore->m_UnstartedThreadCount); _ASSERTE(s_pThreadStore->m_ThreadCount >= s_pThreadStore->m_DeadThreadCount); // One of the components of OtherThreadsComplete() has changed, so check whether // we should now exit the EE. CheckForEEShutdown(); } return found; } // When a thread is created as unstarted. Later it may get started, in which case // someone calls Thread::HasStarted() on that physical thread. This completes // the Setup and calls here. void ThreadStore::TransferStartedThread(Thread *thread) { CONTRACTL { NOTHROW; GC_TRIGGERS; PRECONDITION(thread != NULL); } CONTRACTL_END; _ASSERTE(GetThreadNULLOk() == thread); BOOL lockHeld = thread->HasThreadStateNC(Thread::TSNC_TSLTakenForStartup); // This ASSERT is correct for one of the following reasons. // - The lock is not currently held which means it will be taken below. // - The thread was created in an Unstarted state and the lock is // being held by the creator thread. The only thing we know for sure // is that the lock is held and not by this thread. _ASSERTE(!lockHeld || (lockHeld && !s_pThreadStore->m_holderthreadid.IsUnknown() && ((s_pThreadStore->m_HoldingThread != NULL) || IsGCSpecialThread()) && !ThreadStore::HoldingThreadStore())); LOG((LF_SYNC, INFO3, "TransferStartedThread obtain lock\n")); ThreadStoreLockHolder TSLockHolder(!lockHeld); _ASSERTE(s_pThreadStore->DbgFindThread(thread)); _ASSERTE(thread->HasValidThreadHandle()); _ASSERTE(thread->m_State & Thread::TS_WeOwn); _ASSERTE(thread->IsUnstarted()); _ASSERTE(!thread->IsDead()); // Of course, m_ThreadCount is already correct since it includes started and // unstarted threads. s_pThreadStore->m_UnstartedThreadCount--; // We only count background threads that have been started if (thread->IsBackground()) s_pThreadStore->m_BackgroundThreadCount++; _ASSERTE(s_pThreadStore->m_PendingThreadCount > 0); FastInterlockDecrement(&s_pThreadStore->m_PendingThreadCount); // As soon as we erase this bit, the thread becomes eligible for suspension, // stopping, interruption, etc. FastInterlockAnd((ULONG *) &thread->m_State, ~Thread::TS_Unstarted); FastInterlockOr((ULONG *) &thread->m_State, Thread::TS_LegalToJoin); // One of the components of OtherThreadsComplete() has changed, so check whether // we should now exit the EE. CheckForEEShutdown(); } LONG ThreadStore::s_DeadThreadCountThresholdForGCTrigger = 0; DWORD ThreadStore::s_DeadThreadGCTriggerPeriodMilliseconds = 0; SIZE_T *ThreadStore::s_DeadThreadGenerationCounts = nullptr; void ThreadStore::IncrementDeadThreadCountForGCTrigger() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // Although all increments and decrements are usually done inside a lock, that is not sufficient to synchronize with a // background GC thread resetting this value, hence the interlocked operation. Ignore overflow; overflow would likely never // occur, the count is treated as unsigned, and nothing bad would happen if it were to overflow. SIZE_T count = static_cast<SIZE_T>(FastInterlockIncrement(&m_DeadThreadCountForGCTrigger)); SIZE_T countThreshold = static_cast<SIZE_T>(s_DeadThreadCountThresholdForGCTrigger); if (count < countThreshold || countThreshold == 0) { return; } IGCHeap *gcHeap = GCHeapUtilities::GetGCHeap(); if (gcHeap == nullptr) { return; } SIZE_T gcLastMilliseconds = gcHeap->GetLastGCStartTime(gcHeap->GetMaxGeneration()); SIZE_T gcNowMilliseconds = gcHeap->GetNow(); if (gcNowMilliseconds - gcLastMilliseconds < s_DeadThreadGCTriggerPeriodMilliseconds) { return; } if (!g_fEEStarted) // required for FinalizerThread::EnableFinalization() below { return; } // The GC is triggered on the finalizer thread since it's not safe to trigger it on DLL_THREAD_DETACH. // TriggerGCForDeadThreadsIfNecessary() will determine which generation of GC to trigger, and may not actually trigger a GC. // If a GC is triggered, since there would be a delay before the dead thread count is updated, clear the count and wait for // it to reach the threshold again. If a GC would not be triggered, the count is still cleared here to prevent waking up the // finalizer thread to do the work in TriggerGCForDeadThreadsIfNecessary() for every dead thread. m_DeadThreadCountForGCTrigger = 0; m_TriggerGCForDeadThreads = true; FinalizerThread::EnableFinalization(); } void ThreadStore::DecrementDeadThreadCountForGCTrigger() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // Although all increments and decrements are usually done inside a lock, that is not sufficient to synchronize with a // background GC thread resetting this value, hence the interlocked operation. if (FastInterlockDecrement(&m_DeadThreadCountForGCTrigger) < 0) { m_DeadThreadCountForGCTrigger = 0; } } void ThreadStore::OnMaxGenerationGCStarted() { LIMITED_METHOD_CONTRACT; // A dead thread may contribute to triggering a GC at most once. After a max-generation GC occurs, if some dead thread // objects are still reachable due to references to the thread objects, they will not contribute to triggering a GC again. // Synchronize the store with increment/decrement operations occurring on different threads, and make the change visible to // other threads in order to prevent unnecessary GC triggers. FastInterlockExchange(&m_DeadThreadCountForGCTrigger, 0); } bool ThreadStore::ShouldTriggerGCForDeadThreads() { LIMITED_METHOD_CONTRACT; return m_TriggerGCForDeadThreads; } void ThreadStore::TriggerGCForDeadThreadsIfNecessary() { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; if (!m_TriggerGCForDeadThreads) { return; } m_TriggerGCForDeadThreads = false; if (g_fEEShutDown) { // Not safe to touch CLR state return; } unsigned gcGenerationToTrigger = 0; IGCHeap *gcHeap = GCHeapUtilities::GetGCHeap(); _ASSERTE(gcHeap != nullptr); SIZE_T generationCountThreshold = static_cast<SIZE_T>(s_DeadThreadCountThresholdForGCTrigger) / 2; unsigned maxGeneration = gcHeap->GetMaxGeneration(); if (!s_DeadThreadGenerationCounts) { // initialize this field on first use with an entry for every table. s_DeadThreadGenerationCounts = new (nothrow) SIZE_T[maxGeneration + 1]; if (!s_DeadThreadGenerationCounts) { return; } } memset(s_DeadThreadGenerationCounts, 0, sizeof(SIZE_T) * (maxGeneration + 1)); { ThreadStoreLockHolder threadStoreLockHolder; GCX_COOP(); // Determine the generation for which to trigger a GC. Iterate over all dead threads that have not yet been considered // for triggering a GC and see how many are in which generations. for (Thread *thread = ThreadStore::GetAllThreadList(NULL, Thread::TS_Dead, Thread::TS_Dead); thread != nullptr; thread = ThreadStore::GetAllThreadList(thread, Thread::TS_Dead, Thread::TS_Dead)) { if (thread->HasDeadThreadBeenConsideredForGCTrigger()) { continue; } Object *exposedObject = OBJECTREFToObject(thread->GetExposedObjectRaw()); if (exposedObject == nullptr) { continue; } unsigned exposedObjectGeneration = gcHeap->WhichGeneration(exposedObject); SIZE_T newDeadThreadGenerationCount = ++s_DeadThreadGenerationCounts[exposedObjectGeneration]; if (exposedObjectGeneration > gcGenerationToTrigger && newDeadThreadGenerationCount >= generationCountThreshold) { gcGenerationToTrigger = exposedObjectGeneration; if (gcGenerationToTrigger >= maxGeneration) { break; } } } // Make sure that enough time has elapsed since the last GC of the desired generation. We don't want to trigger GCs // based on this heuristic too often. Give it some time to let the memory pressure trigger GCs automatically, and only // if it doesn't in the given time, this heuristic may kick in to trigger a GC. SIZE_T gcLastMilliseconds = gcHeap->GetLastGCStartTime(gcGenerationToTrigger); SIZE_T gcNowMilliseconds = gcHeap->GetNow(); if (gcNowMilliseconds - gcLastMilliseconds < s_DeadThreadGCTriggerPeriodMilliseconds) { return; } // For threads whose exposed objects are in the generation of GC that will be triggered or in a lower GC generation, // mark them as having contributed to a GC trigger to prevent redundant GC triggers for (Thread *thread = ThreadStore::GetAllThreadList(NULL, Thread::TS_Dead, Thread::TS_Dead); thread != nullptr; thread = ThreadStore::GetAllThreadList(thread, Thread::TS_Dead, Thread::TS_Dead)) { if (thread->HasDeadThreadBeenConsideredForGCTrigger()) { continue; } Object *exposedObject = OBJECTREFToObject(thread->GetExposedObjectRaw()); if (exposedObject == nullptr) { continue; } if (gcGenerationToTrigger < maxGeneration && gcHeap->WhichGeneration(exposedObject) > gcGenerationToTrigger) { continue; } thread->SetHasDeadThreadBeenConsideredForGCTrigger(); } } // ThreadStoreLockHolder, GCX_COOP() GCHeapUtilities::GetGCHeap()->GarbageCollect(gcGenerationToTrigger, FALSE, collection_non_blocking); } #endif // #ifndef DACCESS_COMPILE // Access the list of threads. You must be inside a critical section, otherwise // the "cursor" thread might disappear underneath you. Pass in NULL for the // cursor to begin at the start of the list. Thread *ThreadStore::GetAllThreadList(Thread *cursor, ULONG mask, ULONG bits) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; SUPPORTS_DAC; #ifndef DACCESS_COMPILE _ASSERTE((s_pThreadStore->m_Crst.GetEnterCount() > 0) || IsAtProcessExit()); #endif while (TRUE) { cursor = (cursor ? s_pThreadStore->m_ThreadList.GetNext(cursor) : s_pThreadStore->m_ThreadList.GetHead()); if (cursor == NULL) break; if ((cursor->m_State & mask) == bits) return cursor; } return NULL; } // Iterate over the threads that have been started Thread *ThreadStore::GetThreadList(Thread *cursor) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; SUPPORTS_DAC; return GetAllThreadList(cursor, (Thread::TS_Unstarted | Thread::TS_Dead), 0); } //--------------------------------------------------------------------------------------- // // Grab a consistent snapshot of the thread's state, for reporting purposes only. // // Return Value: // the current state of the thread // Thread::ThreadState Thread::GetSnapshotState() { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; ThreadState res = m_State; if (res & TS_ReportDead) { res = (ThreadState) (res | TS_Dead); } return res; } #ifndef DACCESS_COMPILE BOOL CLREventWaitWithTry(CLREventBase *pEvent, DWORD timeout, BOOL fAlertable, DWORD *pStatus) { CONTRACTL { NOTHROW; WRAPPER(GC_TRIGGERS); } CONTRACTL_END; BOOL fLoop = TRUE; EX_TRY { *pStatus = pEvent->Wait(timeout, fAlertable); fLoop = FALSE; } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions); return fLoop; } // We shut down the EE only when all the non-background threads have terminated // (unless this is an exceptional termination). So the main thread calls here to // wait before tearing down the EE. void ThreadStore::WaitForOtherThreads() { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; CHECK_ONE_STORE(); Thread *pCurThread = GetThread(); // Regardless of whether the main thread is a background thread or not, force // it to be one. This simplifies our rules for counting non-background threads. pCurThread->SetBackground(TRUE); LOG((LF_SYNC, INFO3, "WaitForOtherThreads obtain lock\n")); ThreadStoreLockHolder TSLockHolder(TRUE); if (!OtherThreadsComplete()) { TSLockHolder.Release(); FastInterlockOr((ULONG *) &pCurThread->m_State, Thread::TS_ReportDead); DWORD ret = WAIT_OBJECT_0; while (CLREventWaitWithTry(&m_TerminationEvent, INFINITE, TRUE, &ret)) { } _ASSERTE(ret == WAIT_OBJECT_0); } } #ifdef _DEBUG BOOL ThreadStore::DbgFindThread(Thread *target) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; CHECK_ONE_STORE(); // Cache the current change stamp for g_TrapReturningThreads LONG chgStamp = g_trtChgStamp; STRESS_LOG3(LF_STORE, LL_INFO100, "ThreadStore::DbgFindThread - [thread=%p]. trt=%d. chgStamp=%d\n", GetThreadNULLOk(), g_TrapReturningThreads.Load(), chgStamp); #if 0 // g_TrapReturningThreads debug code. int iRetry = 0; Retry: #endif // g_TrapReturningThreads debug code. BOOL found = FALSE; Thread *cur = NULL; LONG cnt = 0; LONG cntBack = 0; LONG cntUnstart = 0; LONG cntDead = 0; LONG cntReturn = 0; while ((cur = GetAllThreadList(cur, 0, 0)) != NULL) { cnt++; if (cur->IsDead()) cntDead++; // Unstarted threads do not contribute to the count of background threads if (cur->IsUnstarted()) cntUnstart++; else if (cur->IsBackground()) cntBack++; if (cur == target) found = TRUE; // Note that (DebugSuspendPending | SuspendPending) implies a count of 2. // We don't count GCPending because a single trap is held for the entire // GC, instead of counting each interesting thread. if (cur->m_State & Thread::TS_DebugSuspendPending) cntReturn++; if (cur->m_TraceCallCount > 0) cntReturn++; if (cur->IsAbortRequested()) cntReturn++; } _ASSERTE(cnt == m_ThreadCount); _ASSERTE(cntUnstart == m_UnstartedThreadCount); _ASSERTE(cntBack == m_BackgroundThreadCount); _ASSERTE(cntDead == m_DeadThreadCount); _ASSERTE(0 <= m_PendingThreadCount); #if 0 // g_TrapReturningThreads debug code. if (cntReturn != g_TrapReturningThreads /*&& !g_fEEShutDown*/) { // If count is off, try again, to account for multiple threads. if (iRetry < 4) { // printf("Retry %d. cntReturn:%d, gReturn:%d\n", iRetry, cntReturn, g_TrapReturningThreads); ++iRetry; goto Retry; } printf("cnt:%d, Un:%d, Back:%d, Dead:%d, cntReturn:%d, TrapReturn:%d, eeShutdown:%d, threadShutdown:%d\n", cnt,cntUnstart,cntBack,cntDead,cntReturn,g_TrapReturningThreads, g_fEEShutDown, Thread::IsAtProcessExit()); LOG((LF_CORDB, LL_INFO1000, "SUSPEND: cnt:%d, Un:%d, Back:%d, Dead:%d, cntReturn:%d, TrapReturn:%d, eeShutdown:%d, threadShutdown:%d\n", cnt,cntUnstart,cntBack,cntDead,cntReturn,g_TrapReturningThreads, g_fEEShutDown, Thread::IsAtProcessExit()) ); //_ASSERTE(cntReturn + 2 >= g_TrapReturningThreads); } if (iRetry > 0 && iRetry < 4) { printf("%d retries to re-sync counted TrapReturn with global TrapReturn.\n", iRetry); } #endif // g_TrapReturningThreads debug code. STRESS_LOG4(LF_STORE, LL_INFO100, "ThreadStore::DbgFindThread - [thread=%p]. trt=%d. chg=%d. cnt=%d\n", GetThreadNULLOk(), g_TrapReturningThreads.Load(), g_trtChgStamp.Load(), cntReturn); // Because of race conditions and the fact that the GC places its // own count, I can't assert this precisely. But I do want to be // sure that this count isn't wandering ever higher -- with a // nasty impact on the performance of GC mode changes and method // call chaining! // // We don't bother asserting this during process exit, because // during a shutdown we will quietly terminate threads that are // being waited on. (If we aren't shutting down, we carefully // decrement our counts and alert anyone waiting for us to // return). // // Note: we don't actually assert this if // ThreadStore::TrapReturningThreads() updated g_TrapReturningThreads // between the beginning of this function and the moment of the assert. // *** The order of evaluation in the if condition is important *** _ASSERTE( (g_trtChgInFlight != 0 || (cntReturn + 2 >= g_TrapReturningThreads) || chgStamp != g_trtChgStamp) || g_fEEShutDown); return found; } #endif // _DEBUG void Thread::HandleThreadInterrupt () { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; // If we're waiting for shutdown, we don't want to abort/interrupt this thread if (HasThreadStateNC(Thread::TSNC_BlockedForShutdown)) return; if ((m_UserInterrupt & TI_Abort) != 0) { HandleThreadAbort(); } if ((m_UserInterrupt & TI_Interrupt) != 0) { ResetThreadState ((ThreadState)(TS_Interrupted | TS_Interruptible)); FastInterlockAnd ((DWORD*)&m_UserInterrupt, ~TI_Interrupt); COMPlusThrow(kThreadInterruptedException); } } #ifdef _DEBUG #define MAXSTACKBYTES (2 * GetOsPageSize()) void CleanStackForFastGCStress () { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; PVOID StackLimit = ClrTeb::GetStackLimit(); size_t nBytes = (size_t)&nBytes - (size_t)StackLimit; nBytes &= ~sizeof (size_t); if (nBytes > MAXSTACKBYTES) { nBytes = MAXSTACKBYTES; } size_t* buffer = (size_t*) _alloca (nBytes); memset(buffer, 0, nBytes); GetThread()->m_pCleanedStackBase = &nBytes; } void Thread::ObjectRefFlush(Thread* thread) { // this is debug only code, so no need to validate STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_ENTRY_POINT; _ASSERTE(thread->PreemptiveGCDisabled()); // Should have been in managed code memset(thread->dangerousObjRefs, 0, sizeof(thread->dangerousObjRefs)); thread->m_allObjRefEntriesBad = FALSE; CLEANSTACKFORFASTGCSTRESS (); } #endif #if defined(STRESS_HEAP) PtrHashMap *g_pUniqueStackMap = NULL; Crst *g_pUniqueStackCrst = NULL; #define UniqueStackDepth 8 BOOL StackCompare (UPTR val1, UPTR val2) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; size_t *p1 = (size_t *)(val1 << 1); size_t *p2 = (size_t *)val2; if (p1[0] != p2[0]) { return FALSE; } size_t nElem = p1[0]; if (nElem >= UniqueStackDepth) { nElem = UniqueStackDepth; } p1 ++; p2 ++; for (size_t n = 0; n < nElem; n ++) { if (p1[n] != p2[n]) { return FALSE; } } return TRUE; } void UniqueStackSetupMap() { WRAPPER_NO_CONTRACT; if (g_pUniqueStackCrst == NULL) { Crst *Attempt = new Crst ( CrstUniqueStack, CrstFlags(CRST_REENTRANCY | CRST_UNSAFE_ANYMODE)); if (FastInterlockCompareExchangePointer(&g_pUniqueStackCrst, Attempt, NULL) != NULL) { // We lost the race delete Attempt; } } // Now we have a Crst we can use to synchronize the remainder of the init. if (g_pUniqueStackMap == NULL) { CrstHolder ch(g_pUniqueStackCrst); if (g_pUniqueStackMap == NULL) { PtrHashMap *map = new (SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()) PtrHashMap (); LockOwner lock = {g_pUniqueStackCrst, IsOwnerOfCrst}; map->Init (256, StackCompare, TRUE, &lock); g_pUniqueStackMap = map; } } } BOOL StartUniqueStackMapHelper() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; BOOL fOK = TRUE; EX_TRY { if (g_pUniqueStackMap == NULL) { UniqueStackSetupMap(); } } EX_CATCH { fOK = FALSE; } EX_END_CATCH(SwallowAllExceptions); return fOK; } BOOL StartUniqueStackMap () { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; return StartUniqueStackMapHelper(); } #ifndef TARGET_UNIX size_t UpdateStackHash(size_t hash, size_t retAddr) { return ((hash << 3) + hash) ^ retAddr; } /***********************************************************************/ size_t getStackHash(size_t* stackTrace, size_t* stackTop, size_t* stackStop, size_t stackBase, size_t stackLimit) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // return a hash of every return address found between 'stackTop' (the lowest address) // and 'stackStop' (the highest address) size_t hash = 0; int idx = 0; #ifdef TARGET_X86 static size_t moduleBase = (size_t) -1; static size_t moduleTop = (size_t) -1; if (moduleTop == (size_t) -1) { MEMORY_BASIC_INFORMATION mbi; if (ClrVirtualQuery(getStackHash, &mbi, sizeof(mbi))) { moduleBase = (size_t)mbi.AllocationBase; moduleTop = (size_t)mbi.BaseAddress + mbi.RegionSize; } else { // way bad error, probably just assert and exit _ASSERTE (!"ClrVirtualQuery failed"); moduleBase = 0; moduleTop = 0; } } while (stackTop < stackStop) { // Clean out things that point to stack, as those can't be return addresses if (*stackTop > moduleBase && *stackTop < moduleTop) { TADDR dummy; if (isRetAddr((TADDR)*stackTop, &dummy)) { hash = UpdateStackHash(hash, *stackTop); // If there is no jitted code on the stack, then just use the // top 16 frames as the context. idx++; if (idx <= UniqueStackDepth) { stackTrace [idx] = *stackTop; } } } stackTop++; } #else // TARGET_X86 CONTEXT ctx; ClrCaptureContext(&ctx); UINT_PTR uControlPc = (UINT_PTR)GetIP(&ctx); UINT_PTR uImageBase; UINT_PTR uPrevControlPc = uControlPc; for (;;) { RtlLookupFunctionEntry(uControlPc, ARM_ONLY((DWORD*))(&uImageBase), NULL ); if (((UINT_PTR)GetClrModuleBase()) != uImageBase) { break; } uControlPc = Thread::VirtualUnwindCallFrame(&ctx); UINT_PTR uRetAddrForHash = uControlPc; if (uPrevControlPc == uControlPc) { // This is a special case when we fail to acquire the loader lock // in RtlLookupFunctionEntry(), which then returns false. The end // result is that we cannot go any further on the stack and // we will loop infinitely (because the owner of the loader lock // is blocked on us). hash = 0; break; } else { uPrevControlPc = uControlPc; } hash = UpdateStackHash(hash, uRetAddrForHash); // If there is no jitted code on the stack, then just use the // top 16 frames as the context. idx++; if (idx <= UniqueStackDepth) { stackTrace [idx] = uRetAddrForHash; } } #endif // TARGET_X86 stackTrace [0] = idx; return(hash); } void UniqueStackHelper(size_t stackTraceHash, size_t *stackTrace) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; EX_TRY { size_t nElem = stackTrace[0]; if (nElem >= UniqueStackDepth) { nElem = UniqueStackDepth; } AllocMemHolder<size_t> stackTraceInMap = SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(size_t *)) * (S_SIZE_T(nElem) + S_SIZE_T(1))); memcpy (stackTraceInMap, stackTrace, sizeof(size_t *) * (nElem + 1)); g_pUniqueStackMap->InsertValue(stackTraceHash, stackTraceInMap); stackTraceInMap.SuppressRelease(); } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions); } /***********************************************************************/ /* returns true if this stack has not been seen before, useful for running tests only once per stack trace. */ BOOL Thread::UniqueStack(void* stackStart) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // If we where not told where to start, start at the caller of UniqueStack if (stackStart == 0) { stackStart = &stackStart; } if (g_pUniqueStackMap == NULL) { if (!StartUniqueStackMap ()) { // We fail to initialize unique stack map due to OOM. // Let's say the stack is unique. return TRUE; } } size_t stackTrace[UniqueStackDepth+1] = {0}; // stackTraceHash represents a hash of entire stack at the time we make the call, // We insure at least GC per unique stackTrace. What information is contained in // 'stackTrace' is somewhat arbitrary. We choose it to mean all functions live // on the stack up to the first jitted function. size_t stackTraceHash; Thread* pThread = GetThread(); void* stopPoint = pThread->m_CacheStackBase; #ifdef TARGET_X86 // Find the stop point (most jitted function) Frame* pFrame = pThread->GetFrame(); for(;;) { // skip GC frames if (pFrame == 0 || pFrame == (Frame*) -1) break; pFrame->GetFunction(); // This insures that helper frames are inited if (pFrame->GetReturnAddress() != 0) { stopPoint = pFrame; break; } pFrame = pFrame->Next(); } #endif // TARGET_X86 // Get hash of all return addresses between here an the top most jitted function stackTraceHash = getStackHash (stackTrace, (size_t*) stackStart, (size_t*) stopPoint, size_t(pThread->m_CacheStackBase), size_t(pThread->m_CacheStackLimit)); if (stackTraceHash == 0 || g_pUniqueStackMap->LookupValue (stackTraceHash, stackTrace) != (LPVOID)INVALIDENTRY) { return FALSE; } BOOL fUnique = FALSE; { CrstHolder ch(g_pUniqueStackCrst); #ifdef _DEBUG if (GetThreadNULLOk()) GetThread()->m_bUniqueStacking = TRUE; #endif if (g_pUniqueStackMap->LookupValue (stackTraceHash, stackTrace) != (LPVOID)INVALIDENTRY) { fUnique = FALSE; } else { fUnique = TRUE; FAULT_NOT_FATAL(); UniqueStackHelper(stackTraceHash, stackTrace); } #ifdef _DEBUG if (GetThreadNULLOk()) GetThread()->m_bUniqueStacking = FALSE; #endif } #ifdef _DEBUG static int fCheckStack = -1; if (fCheckStack == -1) { fCheckStack = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_FastGCCheckStack); } if (fCheckStack && pThread->m_pCleanedStackBase > stackTrace && pThread->m_pCleanedStackBase - stackTrace > (int) MAXSTACKBYTES) { _ASSERTE (!"Garbage on stack"); } #endif return fUnique; } #else // !TARGET_UNIX BOOL Thread::UniqueStack(void* stackStart) { return FALSE; } #endif // !TARGET_UNIX #endif // STRESS_HEAP /* * GetStackLowerBound * * Returns the lower bound of the stack space. Note -- the practical bound is some number of pages greater than * this value -- those pages are reserved for a stack overflow exception processing. * * Parameters: * None * * Returns: * address of the lower bound of the threads's stack. */ void * Thread::GetStackLowerBound() { // Called during fiber switch. Can not have non-static contract. STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; #ifndef TARGET_UNIX MEMORY_BASIC_INFORMATION lowerBoundMemInfo; SIZE_T dwRes; dwRes = ClrVirtualQuery((const void *)&lowerBoundMemInfo, &lowerBoundMemInfo, sizeof(MEMORY_BASIC_INFORMATION)); if (sizeof(MEMORY_BASIC_INFORMATION) == dwRes) { return (void *)(lowerBoundMemInfo.AllocationBase); } else { return NULL; } #else // !TARGET_UNIX return PAL_GetStackLimit(); #endif // !TARGET_UNIX } /* * GetStackUpperBound * * Return the upper bound of the thread's stack space. * * Parameters: * None * * Returns: * address of the base of the threads's stack. */ void *Thread::GetStackUpperBound() { // Called during fiber switch. Can not have non-static contract. STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; return ClrTeb::GetStackBase(); } BOOL Thread::SetStackLimits(SetStackLimitScope scope) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; if (scope == fAll) { m_CacheStackBase = GetStackUpperBound(); m_CacheStackLimit = GetStackLowerBound(); if (m_CacheStackLimit == NULL) { _ASSERTE(!"Failed to set stack limits"); return FALSE; } // Compute the limit used by EnsureSufficientExecutionStack and cache it on the thread. This minimum stack size should // be sufficient to allow a typical non-recursive call chain to execute, including potential exception handling and // garbage collection. Used for probing for available stack space through RuntimeImports.EnsureSufficientExecutionStack, // among other things. #ifdef HOST_64BIT const UINT_PTR MinExecutionStackSize = 128 * 1024; #else // !HOST_64BIT const UINT_PTR MinExecutionStackSize = 64 * 1024; #endif // HOST_64BIT _ASSERTE(m_CacheStackBase >= m_CacheStackLimit); if ((reinterpret_cast<UINT_PTR>(m_CacheStackBase) - reinterpret_cast<UINT_PTR>(m_CacheStackLimit)) > MinExecutionStackSize) { m_CacheStackSufficientExecutionLimit = reinterpret_cast<UINT_PTR>(m_CacheStackLimit) + MinExecutionStackSize; } else { m_CacheStackSufficientExecutionLimit = reinterpret_cast<UINT_PTR>(m_CacheStackBase); } // Compute the limit used by CheckCanUseStackAllocand cache it on the thread. This minimum stack size should // be sufficient to avoid all significant risk of a moderate size stack alloc interfering with application behavior const UINT_PTR StackAllocNonRiskyExecutionStackSize = 512 * 1024; _ASSERTE(m_CacheStackBase >= m_CacheStackLimit); if ((reinterpret_cast<UINT_PTR>(m_CacheStackBase) - reinterpret_cast<UINT_PTR>(m_CacheStackLimit)) > StackAllocNonRiskyExecutionStackSize) { m_CacheStackStackAllocNonRiskyExecutionLimit = reinterpret_cast<UINT_PTR>(m_CacheStackLimit) + StackAllocNonRiskyExecutionStackSize; } else { m_CacheStackStackAllocNonRiskyExecutionLimit = reinterpret_cast<UINT_PTR>(m_CacheStackBase); } } // Ensure that we've setup the stack guarantee properly before we cache the stack limits // as they depend upon the stack guarantee. if (FAILED(CLRSetThreadStackGuarantee())) return FALSE; return TRUE; } //--------------------------------------------------------------------------------------------- // Routines we use to managed a thread's stack, for fiber switching or stack overflow purposes. //--------------------------------------------------------------------------------------------- HRESULT Thread::CLRSetThreadStackGuarantee(SetThreadStackGuaranteeScope fScope) { CONTRACTL { WRAPPER(NOTHROW); GC_NOTRIGGER; } CONTRACTL_END; #ifndef TARGET_UNIX // TODO: we need to measure what the stack usage needs are at the limits in the hosted scenario for host callbacks if (Thread::IsSetThreadStackGuaranteeInUse(fScope)) { // <TODO> Tune this as needed </TODO> ULONG uGuardSize = SIZEOF_DEFAULT_STACK_GUARANTEE; int EXTRA_PAGES = 0; #if defined(HOST_64BIT) // Free Build EH Stack Stats: // -------------------------------- // currently the maximum stack usage we'll face while handling a SO includes: // 4.3k for the OS (kernel32!RaiseException, Rtl EH dispatch code, RtlUnwindEx [second pass]) // 1.2k for the CLR EH setup (NakedThrowHelper*) // 4.5k for other heavy CLR stack creations (2x CONTEXT, 1x REGDISPLAY) // ~1.0k for other misc CLR stack allocations // ----- // 11.0k --> ~2.75 pages for CLR SO EH dispatch // // -plus we might need some more for debugger EH dispatch, Watson, etc... // -also need to take into account that we can lose up to 1 page of the guard region // -additionally, we need to provide some region to hosts to allow for lock acquisition in a hosted scenario // EXTRA_PAGES = 3; INDEBUG(EXTRA_PAGES += 1); int ThreadGuardPages = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ThreadGuardPages); if (ThreadGuardPages == 0) { uGuardSize += (EXTRA_PAGES * GetOsPageSize()); } else { uGuardSize += (ThreadGuardPages * GetOsPageSize()); } #else // HOST_64BIT #ifdef _DEBUG uGuardSize += (1 * GetOsPageSize()); // one extra page for debug infrastructure #endif // _DEBUG #endif // HOST_64BIT LOG((LF_EH, LL_INFO10000, "STACKOVERFLOW: setting thread stack guarantee to 0x%x\n", uGuardSize)); if (!::SetThreadStackGuarantee(&uGuardSize)) { return HRESULT_FROM_GetLastErrorNA(); } } #endif // !TARGET_UNIX return S_OK; } /* * GetLastNormalStackAddress * * GetLastNormalStackAddress returns the last stack address before the guard * region of a thread. This is the last address that one could write to before * a stack overflow occurs. * * Parameters: * StackLimit - the base of the stack allocation * * Returns: * Address of the first page of the guard region. */ UINT_PTR Thread::GetLastNormalStackAddress(UINT_PTR StackLimit) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; UINT_PTR cbStackGuarantee = GetStackGuarantee(); // Here we take the "hard guard region size", the "stack guarantee" and the "fault page" and add them // all together. Note that the "fault page" is the reason for the extra GetOsPageSize() below. The OS // will guarantee us a certain amount of stack remaining after a stack overflow. This is called the // "stack guarantee". But to do this, it has to fault on the page before that region as the app is // allowed to fault at the very end of that page. So, as a result, the last normal stack address is // one page sooner. return StackLimit + (cbStackGuarantee #ifndef TARGET_UNIX + GetOsPageSize() #endif // !TARGET_UNIX + HARD_GUARD_REGION_SIZE); } #ifdef _DEBUG static void DebugLogMBIFlags(UINT uState, UINT uProtect) { CONTRACTL { NOTHROW; GC_NOTRIGGER; CANNOT_TAKE_LOCK; } CONTRACTL_END; #ifndef TARGET_UNIX #define LOG_FLAG(flags, name) \ if (flags & name) \ { \ LOG((LF_EH, LL_INFO1000, "" #name " ")); \ } \ if (uState) { LOG((LF_EH, LL_INFO1000, "State: ")); LOG_FLAG(uState, MEM_COMMIT); LOG_FLAG(uState, MEM_RESERVE); LOG_FLAG(uState, MEM_DECOMMIT); LOG_FLAG(uState, MEM_RELEASE); LOG_FLAG(uState, MEM_FREE); LOG_FLAG(uState, MEM_PRIVATE); LOG_FLAG(uState, MEM_MAPPED); LOG_FLAG(uState, MEM_RESET); LOG_FLAG(uState, MEM_TOP_DOWN); LOG_FLAG(uState, MEM_WRITE_WATCH); LOG_FLAG(uState, MEM_PHYSICAL); LOG_FLAG(uState, MEM_LARGE_PAGES); LOG_FLAG(uState, MEM_4MB_PAGES); } if (uProtect) { LOG((LF_EH, LL_INFO1000, "Protect: ")); LOG_FLAG(uProtect, PAGE_NOACCESS); LOG_FLAG(uProtect, PAGE_READONLY); LOG_FLAG(uProtect, PAGE_READWRITE); LOG_FLAG(uProtect, PAGE_WRITECOPY); LOG_FLAG(uProtect, PAGE_EXECUTE); LOG_FLAG(uProtect, PAGE_EXECUTE_READ); LOG_FLAG(uProtect, PAGE_EXECUTE_READWRITE); LOG_FLAG(uProtect, PAGE_EXECUTE_WRITECOPY); LOG_FLAG(uProtect, PAGE_GUARD); LOG_FLAG(uProtect, PAGE_NOCACHE); LOG_FLAG(uProtect, PAGE_WRITECOMBINE); } #undef LOG_FLAG #endif // !TARGET_UNIX } static void DebugLogStackRegionMBIs(UINT_PTR uLowAddress, UINT_PTR uHighAddress) { CONTRACTL { NOTHROW; GC_NOTRIGGER; CANNOT_TAKE_LOCK; } CONTRACTL_END; MEMORY_BASIC_INFORMATION meminfo; UINT_PTR uStartOfThisRegion = uLowAddress; LOG((LF_EH, LL_INFO1000, "----------------------------------------------------------------------\n")); while (uStartOfThisRegion < uHighAddress) { SIZE_T res = ClrVirtualQuery((const void *)uStartOfThisRegion, &meminfo, sizeof(meminfo)); if (sizeof(meminfo) != res) { LOG((LF_EH, LL_INFO1000, "VirtualQuery failed on %p\n", uStartOfThisRegion)); break; } UINT_PTR uStartOfNextRegion = uStartOfThisRegion + meminfo.RegionSize; if (uStartOfNextRegion > uHighAddress) { uStartOfNextRegion = uHighAddress; } UINT_PTR uRegionSize = uStartOfNextRegion - uStartOfThisRegion; LOG((LF_EH, LL_INFO1000, "0x%p -> 0x%p (%d pg) ", uStartOfThisRegion, uStartOfNextRegion - 1, uRegionSize / GetOsPageSize())); DebugLogMBIFlags(meminfo.State, meminfo.Protect); LOG((LF_EH, LL_INFO1000, "\n")); uStartOfThisRegion = uStartOfNextRegion; } LOG((LF_EH, LL_INFO1000, "----------------------------------------------------------------------\n")); } // static void Thread::DebugLogStackMBIs() { CONTRACTL { NOTHROW; GC_NOTRIGGER; CANNOT_TAKE_LOCK; } CONTRACTL_END; Thread* pThread = GetThreadNULLOk(); // N.B. this can be NULL! UINT_PTR uStackLimit = (UINT_PTR)GetStackLowerBound(); UINT_PTR uStackBase = (UINT_PTR)GetStackUpperBound(); if (pThread) { uStackLimit = (UINT_PTR)pThread->GetCachedStackLimit(); uStackBase = (UINT_PTR)pThread->GetCachedStackBase(); } else { uStackLimit = (UINT_PTR)GetStackLowerBound(); uStackBase = (UINT_PTR)GetStackUpperBound(); } UINT_PTR uStackSize = uStackBase - uStackLimit; LOG((LF_EH, LL_INFO1000, "----------------------------------------------------------------------\n")); LOG((LF_EH, LL_INFO1000, "Stack Snapshot 0x%p -> 0x%p (%d pg)\n", uStackLimit, uStackBase, uStackSize / GetOsPageSize())); if (pThread) { LOG((LF_EH, LL_INFO1000, "Last normal addr: 0x%p\n", pThread->GetLastNormalStackAddress())); } DebugLogStackRegionMBIs(uStackLimit, uStackBase); } #endif // _DEBUG NOINLINE void AllocateSomeStack(){ LIMITED_METHOD_CONTRACT; #ifdef TARGET_X86 const size_t size = 0x200; #else //TARGET_X86 const size_t size = 0x400; #endif //TARGET_X86 INT8* mem = (INT8*)_alloca(size); // Actually touch the memory we just allocated so the compiler can't // optimize it away completely. // NOTE: this assumes the stack grows down (towards 0). VolatileStore<INT8>(mem, 0); } #ifndef TARGET_UNIX // static // private BOOL Thread::DoesRegionContainGuardPage(UINT_PTR uLowAddress, UINT_PTR uHighAddress) { CONTRACTL { NOTHROW; GC_NOTRIGGER; CANNOT_TAKE_LOCK; } CONTRACTL_END; SIZE_T dwRes; MEMORY_BASIC_INFORMATION meminfo; UINT_PTR uStartOfCurrentRegion = uLowAddress; while (uStartOfCurrentRegion < uHighAddress) { #undef VirtualQuery // This code can run below YieldTask, which means that it must not call back into the host. // The reason is that YieldTask is invoked by the host, and the host needs not be reentrant. dwRes = VirtualQuery((const void *)uStartOfCurrentRegion, &meminfo, sizeof(meminfo)); #define VirtualQuery(lpAddress, lpBuffer, dwLength) Dont_Use_VirtualQuery(lpAddress, lpBuffer, dwLength) // If the query fails then assume we have no guard page. if (sizeof(meminfo) != dwRes) { return FALSE; } if (meminfo.Protect & PAGE_GUARD) { return TRUE; } uStartOfCurrentRegion += meminfo.RegionSize; } return FALSE; } #endif // !TARGET_UNIX /* * DetermineIfGuardPagePresent * * DetermineIfGuardPagePresent returns TRUE if the thread's stack contains a proper guard page. This function makes * a physical check of the stack, rather than relying on whether or not the CLR is currently processing a stack * overflow exception. * * It seems reasonable to want to check just the 3rd page for !MEM_COMMIT or PAGE_GUARD, but that's no good in a * world where a) one can extend the guard region arbitrarily with SetThreadStackGuarantee(), b) a thread's stack * could be pre-committed, and c) another lib might reset the guard page very high up on the stack, much as we * do. In that world, we have to do VirtualQuery from the lower bound up until we find a region with PAGE_GUARD on * it. If we've never SO'd, then that's two calls to VirtualQuery. * * Parameters: * None * * Returns: * TRUE if the thread has a guard page, FALSE otherwise. */ BOOL Thread::DetermineIfGuardPagePresent() { CONTRACTL { NOTHROW; GC_NOTRIGGER; CANNOT_TAKE_LOCK; } CONTRACTL_END; #ifndef TARGET_UNIX BOOL bStackGuarded = FALSE; UINT_PTR uStackBase = (UINT_PTR)GetCachedStackBase(); UINT_PTR uStackLimit = (UINT_PTR)GetCachedStackLimit(); // Note: we start our queries after the hard guard page (one page up from the base of the stack.) We know the // very last region of the stack is never the guard page (its always the uncomitted "hard" guard page) so there's // no need to waste a query on it. bStackGuarded = DoesRegionContainGuardPage(uStackLimit + HARD_GUARD_REGION_SIZE, uStackBase); LOG((LF_EH, LL_INFO10000, "Thread::DetermineIfGuardPagePresent: stack guard page: %s\n", bStackGuarded ? "PRESENT" : "MISSING")); return bStackGuarded; #else // !TARGET_UNIX return TRUE; #endif // !TARGET_UNIX } /* * GetLastNormalStackAddress * * GetLastNormalStackAddress returns the last stack address before the guard * region of this thread. This is the last address that one could write to * before a stack overflow occurs. * * Parameters: * None * * Returns: * Address of the first page of the guard region. */ UINT_PTR Thread::GetLastNormalStackAddress() { WRAPPER_NO_CONTRACT; return GetLastNormalStackAddress((UINT_PTR)m_CacheStackLimit); } /* * GetStackGuarantee * * Returns the amount of stack guaranteed after an SO but before the OS rips the process. * * Parameters: * none * * Returns: * The stack guarantee in OS pages. */ UINT_PTR Thread::GetStackGuarantee() { WRAPPER_NO_CONTRACT; #ifndef TARGET_UNIX // There is a new API available on new OS's called SetThreadStackGuarantee. It allows you to change the size of // the guard region on a per-thread basis. If we're running on an OS that supports the API, then we must query // it to see if someone has changed the size of the guard region for this thread. if (!IsSetThreadStackGuaranteeInUse()) { return SIZEOF_DEFAULT_STACK_GUARANTEE; } ULONG cbNewStackGuarantee = 0; // Passing in a value of 0 means that we're querying, and the value is changed with the new guard region // size. if (::SetThreadStackGuarantee(&cbNewStackGuarantee) && (cbNewStackGuarantee != 0)) { return cbNewStackGuarantee; } #endif // TARGET_UNIX return SIZEOF_DEFAULT_STACK_GUARANTEE; } #ifndef TARGET_UNIX // // MarkPageAsGuard // // Given a page base address, try to turn it into a guard page and then requery to determine success. // // static // private BOOL Thread::MarkPageAsGuard(UINT_PTR uGuardPageBase) { CONTRACTL { NOTHROW; GC_NOTRIGGER; CANNOT_TAKE_LOCK; } CONTRACTL_END; DWORD flOldProtect; ClrVirtualProtect((LPVOID)uGuardPageBase, 1, (PAGE_READWRITE | PAGE_GUARD), &flOldProtect); // Intentionally ignore return value -- if it failed, we'll find out below // and keep moving up the stack until we either succeed or we hit the guard // region. If we don't succeed before we hit the guard region, we'll end up // with a fatal error. // Now, make sure the guard page is really there. If its not, then VirtualProtect most likely failed // because our stack had grown onto the page we were trying to protect by the time we made it into // VirtualProtect. So try the next page down. MEMORY_BASIC_INFORMATION meminfo; SIZE_T dwRes; dwRes = ClrVirtualQuery((const void *)uGuardPageBase, &meminfo, sizeof(meminfo)); return ((sizeof(meminfo) == dwRes) && (meminfo.Protect & PAGE_GUARD)); } /* * RestoreGuardPage * * RestoreGuardPage will replace the guard page on this thread's stack. The assumption is that it was removed by * the OS due to a stack overflow exception. This function requires that you know that you have enough stack space * to restore the guard page, so make sure you know what you're doing when you decide to call this. * * Parameters: * None * * Returns: * Nothing */ VOID Thread::RestoreGuardPage() { CONTRACTL { NOTHROW; GC_NOTRIGGER; CANNOT_TAKE_LOCK; } CONTRACTL_END; BOOL bStackGuarded = DetermineIfGuardPagePresent(); // If the guard page is still there, then just return. if (bStackGuarded) { LOG((LF_EH, LL_INFO100, "Thread::RestoreGuardPage: no need to restore... guard page is already there.\n")); return; } UINT_PTR approxStackPointer; UINT_PTR guardPageBase; UINT_PTR guardRegionThreshold; BOOL pageMissing; if (!bStackGuarded) { // The normal guard page is the 3rd page from the base. The first page is the "hard" guard, the second one is // reserve, and the 3rd one is marked as a guard page. However, since there is now an API (on some platforms) // to change the size of the guard region, we'll just go ahead and protect the next page down from where we are // now. The guard page will get pushed forward again, just like normal, until the next stack overflow. approxStackPointer = (UINT_PTR)GetCurrentSP(); guardPageBase = (UINT_PTR)ALIGN_DOWN(approxStackPointer, GetOsPageSize()) - GetOsPageSize(); // OS uses soft guard page to update the stack info in TEB. If our guard page is not beyond the current stack, the TEB // will not be updated, and then OS's check of stack during exception will fail. if (approxStackPointer >= guardPageBase) { guardPageBase -= GetOsPageSize(); } // If we're currently "too close" to the page we want to mark as a guard then the call to VirtualProtect to set // PAGE_GUARD will fail, but it won't return an error. Therefore, we protect the page, then query it to make // sure it worked. If it didn't, we try the next page down. We'll either find a page to protect, or run into // the guard region and rip the process down with EEPOLICY_HANDLE_FATAL_ERROR below. guardRegionThreshold = GetLastNormalStackAddress(); pageMissing = TRUE; while (pageMissing) { LOG((LF_EH, LL_INFO10000, "Thread::RestoreGuardPage: restoring guard page @ 0x%p, approxStackPointer=0x%p, " "last normal stack address=0x%p\n", guardPageBase, approxStackPointer, guardRegionThreshold)); // Make sure we set the guard page above the guard region. if (guardPageBase < guardRegionThreshold) { goto lFatalError; } if (MarkPageAsGuard(guardPageBase)) { // The current GuardPage should be beyond the current SP. _ASSERTE (guardPageBase < approxStackPointer); pageMissing = FALSE; } else { guardPageBase -= GetOsPageSize(); } } } INDEBUG(DebugLogStackMBIs()); return; lFatalError: STRESS_LOG2(LF_EH, LL_ALWAYS, "Thread::RestoreGuardPage: too close to the guard region (0x%p) to restore guard page @0x%p\n", guardRegionThreshold, guardPageBase); _ASSERTE(!"Too close to the guard page to reset it!"); EEPOLICY_HANDLE_FATAL_ERROR(COR_E_STACKOVERFLOW); } #endif // !TARGET_UNIX #endif // #ifndef DACCESS_COMPILE // // InitRegDisplay: initializes a REGDISPLAY for a thread. If validContext // is false, pRD is filled from the current context of the thread. The // thread's current context is also filled in pctx. If validContext is true, // pctx should point to a valid context and pRD is filled from that. // bool Thread::InitRegDisplay(const PREGDISPLAY pRD, PT_CONTEXT pctx, bool validContext) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; if (!validContext) { if (GetFilterContext()!= NULL) { pctx = GetFilterContext(); } else { #ifdef DACCESS_COMPILE DacNotImpl(); #else pctx->ContextFlags = CONTEXT_FULL; _ASSERTE(this != GetThreadNULLOk()); // do not call GetThreadContext on the active thread BOOL ret = EEGetThreadContext(this, pctx); if (!ret) { SetIP(pctx, 0); #ifdef TARGET_X86 pRD->ControlPC = pctx->Eip; pRD->PCTAddr = (TADDR)&(pctx->Eip); #elif defined(TARGET_AMD64) // nothing more to do here, on Win64 setting the IP to 0 is enough. #elif defined(TARGET_ARM) // nothing more to do here, on Win64 setting the IP to 0 is enough. #else PORTABILITY_ASSERT("NYI for platform Thread::InitRegDisplay"); #endif return false; } #endif // DACCESS_COMPILE } } FillRegDisplay( pRD, pctx ); return true; } void Thread::FillRegDisplay(const PREGDISPLAY pRD, PT_CONTEXT pctx) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; ::FillRegDisplay(pRD, pctx); #if defined(DEBUG_REGDISPLAY) && !defined(TARGET_X86) CONSISTENCY_CHECK(!pRD->_pThread || pRD->_pThread == this); pRD->_pThread = this; CheckRegDisplaySP(pRD); #endif // defined(DEBUG_REGDISPLAY) && !defined(TARGET_X86) } #ifdef DEBUG_REGDISPLAY void CheckRegDisplaySP (REGDISPLAY *pRD) { if (pRD->SP && pRD->_pThread) { #ifndef NO_FIXED_STACK_LIMIT _ASSERTE(pRD->_pThread->IsExecutingOnAltStack() || PTR_VOID(pRD->SP) >= pRD->_pThread->GetCachedStackLimit()); #endif // NO_FIXED_STACK_LIMIT _ASSERTE(pRD->_pThread->IsExecutingOnAltStack() || PTR_VOID(pRD->SP) < pRD->_pThread->GetCachedStackBase()); } } #endif // DEBUG_REGDISPLAY // Trip Functions // ============== // When a thread reaches a safe place, it will rendezvous back with us, via one of // the following trip functions: void CommonTripThread() { #ifndef DACCESS_COMPILE CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; Thread *thread = GetThread(); thread->HandleThreadAbort (); if (thread->CatchAtSafePoint()) { _ASSERTE(!ThreadStore::HoldingThreadStore(thread)); #ifdef FEATURE_HIJACK thread->UnhijackThread(); #endif // FEATURE_HIJACK // Trap thread->PulseGCMode(); } #else DacNotImpl(); #endif // #ifndef DACCESS_COMPILE } #ifndef DACCESS_COMPILE void Thread::SetFilterContext(CONTEXT *pContext) { // SetFilterContext is like pushing a Frame onto the Frame chain. CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; // Absolutely must be in coop to coordinate w/ Runtime suspension. PRECONDITION(GetThread() == this); // must be on current thread. } CONTRACTL_END; m_debuggerFilterContext = pContext; } #endif // #ifndef DACCESS_COMPILE T_CONTEXT *Thread::GetFilterContext(void) { LIMITED_METHOD_DAC_CONTRACT; return m_debuggerFilterContext; } #ifndef DACCESS_COMPILE void Thread::ClearContext() { CONTRACTL { NOTHROW; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; if (!m_pDomain) return; // must set exposed context to null first otherwise object verification // checks will fail AV when m_Context is null m_pDomain = NULL; #ifdef FEATURE_COMINTEROP m_fDisableComObjectEagerCleanup = false; #endif //FEATURE_COMINTEROP } BOOL Thread::HaveExtraWorkForFinalizer() { LIMITED_METHOD_CONTRACT; return RequireSyncBlockCleanup() || ThreadpoolMgr::HaveTimerInfosToFlush() || Thread::CleanupNeededForFinalizedThread() || (m_DetachCount > 0) || SystemDomain::System()->RequireAppDomainCleanup() || YieldProcessorNormalization::IsMeasurementScheduled() || ThreadStore::s_pThreadStore->ShouldTriggerGCForDeadThreads(); } void Thread::DoExtraWorkForFinalizer() { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; _ASSERTE(GetThread() == this); _ASSERTE(this == FinalizerThread::GetFinalizerThread()); #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT if (RequiresCoInitialize()) { SetApartment(AS_InMTA); } #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT if (RequireSyncBlockCleanup()) { #ifndef TARGET_UNIX InteropSyncBlockInfo::FlushStandbyList(); #endif // !TARGET_UNIX #ifdef FEATURE_COMINTEROP RCW::FlushStandbyList(); #endif // FEATURE_COMINTEROP SyncBlockCache::GetSyncBlockCache()->CleanupSyncBlocks(); } if (SystemDomain::System()->RequireAppDomainCleanup()) { SystemDomain::System()->ProcessDelayedUnloadLoaderAllocators(); } if(m_DetachCount > 0 || Thread::CleanupNeededForFinalizedThread()) { Thread::CleanupDetachedThreads(); } // If there were any TimerInfos waiting to be released, they'll get flushed now ThreadpoolMgr::FlushQueueOfTimerInfos(); if (YieldProcessorNormalization::IsMeasurementScheduled()) { GCX_PREEMP(); YieldProcessorNormalization::PerformMeasurement(); } ThreadStore::s_pThreadStore->TriggerGCForDeadThreadsIfNecessary(); } // HELPERS FOR THE BASE OF A MANAGED THREAD, INCLUDING AD TRANSITION SUPPORT // We have numerous places where we start up a managed thread. This includes several places in the // ThreadPool, the 'new Thread(...).Start()' case, and the Finalizer. Try to factor the code so our // base exception handling behavior is consistent across those places. The resulting code is convoluted, // but it's better than the prior situation of each thread being on a different plan. // We need Middle & Outer methods for the usual problem of combining C++ & SEH. /* The effect of all this is that we get: Base of thread -- OS unhandled exception filter that we hook SEH handler from DispatchOuter C++ handler from DispatchMiddle User code that obviously can throw. */ struct ManagedThreadCallState { ADCallBackFcnType pTarget; LPVOID args; UnhandledExceptionLocation filterType; ManagedThreadCallState(ADCallBackFcnType Target,LPVOID Args, UnhandledExceptionLocation FilterType): pTarget(Target), args(Args), filterType(FilterType) { LIMITED_METHOD_CONTRACT; }; }; // The following static helpers are outside of the ManagedThreadBase struct because I // don't want to change threads.h whenever I change the mechanism for how unhandled // exceptions works. The ManagedThreadBase struct is for the public exposure of the // API only. static void ManagedThreadBase_DispatchOuter(ManagedThreadCallState *pCallState); static void ManagedThreadBase_DispatchInner(ManagedThreadCallState *pCallState) { CONTRACTL { GC_TRIGGERS; THROWS; MODE_COOPERATIVE; } CONTRACTL_END; // Go ahead and dispatch the call. (*pCallState->pTarget) (pCallState->args); } static void ManagedThreadBase_DispatchMiddle(ManagedThreadCallState *pCallState) { STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_THROWS; STATIC_CONTRACT_MODE_COOPERATIVE; EX_TRY_CPP_ONLY { // During an unwind, we have some cleanup: // // 1) We should no longer suppress any unhandled exception reporting at the base // of the thread, because any handler that contained the exception to the AppDomain // where it occurred is now being removed from the stack. // // 2) We need to unwind the Frame chain. We cannot do it when we get to the __except clause // because at this point we are in the 2nd phase and the stack has been popped. Any // stack crawling from another thread will see a frame chain in a popped region of stack. // Nor can we pop it in a filter, since this would destroy all the stack-walking information // we need to perform the 2nd pass. So doing it in a C++ destructor will ensure it happens // during the 2nd pass but before the stack is actually popped. class Cleanup { Frame *m_pEntryFrame; Thread *m_pThread; public: Cleanup(Thread* pThread) { m_pThread = pThread; m_pEntryFrame = pThread->m_pFrame; } ~Cleanup() { GCX_COOP(); m_pThread->SetFrame(m_pEntryFrame); } }; Cleanup cleanup(GetThread()); ManagedThreadBase_DispatchInner(pCallState); } EX_CATCH_CPP_ONLY { GCX_COOP(); Exception *pException = GET_EXCEPTION(); // RudeThreadAbort is a pre-allocated instance of ThreadAbort. So the following is sufficient. // For Whidbey, by default only swallow certain exceptions. If reverting back to Everett's // behavior (swallowing all unhandled exception), then swallow all unhandled exception. // if (IsExceptionOfType(kThreadAbortException, pException)) { // Do nothing to swallow the exception } else { // Setting up the unwind_and_continue_handler ensures that C++ exceptions do not leak out. // // Without unwind_and_continue_handler below, the exception will fly up the stack to // this point, where it will be rethrown and thus leak out. INSTALL_UNWIND_AND_CONTINUE_HANDLER; EX_RETHROW; UNINSTALL_UNWIND_AND_CONTINUE_HANDLER; } } EX_END_CATCH(SwallowAllExceptions); } /* typedef struct Param { ManagedThreadCallState * m_pCallState; Frame * m_pFrame; Param(ManagedThreadCallState * pCallState, Frame * pFrame): m_pCallState(pCallState), m_pFrame(pFrame) {} } TryParam; */ typedef struct Param: public NotifyOfCHFFilterWrapperParam { ManagedThreadCallState * m_pCallState; Param(ManagedThreadCallState * pCallState): m_pCallState(pCallState) {} } TryParam; // Dispatch to the appropriate filter, based on the active CallState. static LONG ThreadBaseRedirectingFilter(PEXCEPTION_POINTERS pExceptionInfo, LPVOID pParam) { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_ANY; TryParam * pRealParam = reinterpret_cast<TryParam *>(pParam); ManagedThreadCallState * _pCallState = pRealParam->m_pCallState; LONG ret = -1; // This will invoke the swallowing filter. If that returns EXCEPTION_CONTINUE_SEARCH, // it will trigger unhandled exception processing. // WARNING - ThreadBaseExceptionAppDomainFilter may not return // This occurs when the debugger decides to intercept an exception and catch it in a frame closer // to the leaf than the one executing this filter ret = ThreadBaseExceptionAppDomainFilter(pExceptionInfo, _pCallState); // Although EXCEPTION_EXECUTE_HANDLER can also be returned in cases corresponding to // unhandled exceptions, all of those cases have already notified the debugger of an unhandled // exception which prevents a second notification indicating the exception was caught if (ret == EXCEPTION_EXECUTE_HANDLER) { // WARNING - NotifyOfCHFFilterWrapper may not return // This occurs when the debugger decides to intercept an exception and catch it in a frame closer // to the leaf than the one executing this filter NotifyOfCHFFilterWrapper(pExceptionInfo, pRealParam); } // Get the reference to the current thread.. Thread *pCurThread = GetThread(); // // In the default domain, when an exception goes unhandled on a managed thread whose threadbase is in the VM (e.g. explicitly spawned threads, // ThreadPool threads, finalizer thread, etc), CLR can end up in the unhandled exception processing path twice. // // The first attempt to perform UE processing happens at the managed thread base (via this function). When it completes, // we will set TSNC_ProcessedUnhandledException state against the thread to indicate that we have perform the unhandled exception processing. // // On CoreSys CoreCLR, the host can ask CoreCLR to run all code in the default domain. As a result, when we return from the first attempt to perform UE // processing, the call could return back with EXCEPTION_EXECUTE_HANDLER since, like desktop CoreCLR is instructed by SL host to swallow all unhandled exceptions, // CoreSys CoreCLR can also be instructed by its Phone host to swallow all unhandled exceptions. As a result, the exception dispatch will never continue to go upstack // to the native threadbase in the OS kernel and thus, there will never be a second attempt to perform UE processing. Hence, we dont, and shouldnt, need to set // TSNC_ProcessedUnhandledException state against the thread if we are in SingleAppDomain mode and have been asked to swallow the exception. // // If we continue to set TSNC_ProcessedUnhandledException and a ThreadPool Thread A has an exception go unhandled, we will swallow it correctly for the first time. // The next time Thread A has an exception go unhandled, our UEF will see TSNC_ProcessedUnhandledException set and assume (incorrectly) UE processing has happened and // will fail to honor the host policy (e.g. swallow unhandled exception). Thus, the 2nd unhandled exception may end up crashing the app when it should not. // if (ret != EXCEPTION_EXECUTE_HANDLER) { LOG((LF_EH, LL_INFO100, "ThreadBaseRedirectingFilter: setting TSNC_ProcessedUnhandledException\n")); // Since we have already done unhandled exception processing for it, we dont want it // to happen again if our UEF gets invoked upon returning back to the OS. // // Set the flag to indicate so. pCurThread->SetThreadStateNC(Thread::TSNC_ProcessedUnhandledException); } return ret; } static void ManagedThreadBase_DispatchOuter(ManagedThreadCallState *pCallState) { STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_THROWS; STATIC_CONTRACT_MODE_COOPERATIVE; // HasStarted() must have already been performed by our caller _ASSERTE(GetThreadNULLOk() != NULL); Thread *pThread = GetThread(); #ifdef FEATURE_EH_FUNCLETS Frame *pFrame = pThread->m_pFrame; #endif // FEATURE_EH_FUNCLETS // The sole purpose of having this frame is to tell the debugger that we have a catch handler here // which may swallow managed exceptions. The debugger needs this in order to send a // CatchHandlerFound (CHF) notification. FrameWithCookie<DebuggerU2MCatchHandlerFrame> catchFrame; TryParam param(pCallState); param.pFrame = &catchFrame; struct TryArgs { TryParam *pTryParam; Thread *pThread; BOOL *pfHadException; #ifdef FEATURE_EH_FUNCLETS Frame *pFrame; #endif // FEATURE_EH_FUNCLETS }args; args.pTryParam = &param; args.pThread = pThread; BOOL fHadException = TRUE; args.pfHadException = &fHadException; #ifdef FEATURE_EH_FUNCLETS args.pFrame = pFrame; #endif // FEATURE_EH_FUNCLETS PAL_TRY(TryArgs *, pArgs, &args) { PAL_TRY(TryParam *, pParam, pArgs->pTryParam) { ManagedThreadBase_DispatchMiddle(pParam->m_pCallState); } PAL_EXCEPT_FILTER(ThreadBaseRedirectingFilter) { // Note: one of our C++ exceptions will never reach this filter because they're always caught by // the EX_CATCH in ManagedThreadBase_DispatchMiddle(). // // If eCLRDeterminedPolicy, we only swallow for TA, RTA, and ADU exception. // For eHostDeterminedPolicy, we will swallow all the managed exception. #ifdef FEATURE_EH_FUNCLETS // this must be done after the second pass has run, it does not // reference anything on the stack, so it is safe to run in an // SEH __except clause as well as a C++ catch clause. ExceptionTracker::PopTrackers(pArgs->pFrame); #endif // FEATURE_EH_FUNCLETS _ASSERTE(!pArgs->pThread->IsAbortRequested()); } PAL_ENDTRY; *(pArgs->pfHadException) = FALSE; } PAL_FINALLY { catchFrame.Pop(); } PAL_ENDTRY; } // For the implementation, there are three variants of work possible: // 1. Establish the base of a managed thread, and switch to the correct AppDomain. static void ManagedThreadBase_FullTransition(ADCallBackFcnType pTarget, LPVOID args, UnhandledExceptionLocation filterType) { CONTRACTL { GC_TRIGGERS; THROWS; MODE_COOPERATIVE; } CONTRACTL_END; ManagedThreadCallState CallState(pTarget, args, filterType); ManagedThreadBase_DispatchOuter(&CallState); } // 2. Establish the base of a managed thread, but the AppDomain transition must be // deferred until later. void ManagedThreadBase_NoADTransition(ADCallBackFcnType pTarget, UnhandledExceptionLocation filterType) { CONTRACTL { GC_TRIGGERS; THROWS; MODE_COOPERATIVE; } CONTRACTL_END; AppDomain *pAppDomain = GetAppDomain(); ManagedThreadCallState CallState(pTarget, NULL, filterType); // self-describing, to create a pTurnAround data for eventual delivery to a subsequent AppDomain // transition. CallState.args = &CallState; ManagedThreadBase_DispatchOuter(&CallState); } // And here are the various exposed entrypoints for base thread behavior // The 'new Thread(...).Start()' case from COMSynchronizable kickoff thread worker void ManagedThreadBase::KickOff(ADCallBackFcnType pTarget, LPVOID args) { WRAPPER_NO_CONTRACT; ManagedThreadBase_FullTransition(pTarget, args, ManagedThread); } // The IOCompletion, QueueUserWorkItem, AddTimer, RegisterWaitForSingleObject cases in the ThreadPool void ManagedThreadBase::ThreadPool(ADCallBackFcnType pTarget, LPVOID args) { WRAPPER_NO_CONTRACT; ManagedThreadBase_FullTransition(pTarget, args, ThreadPoolThread); } // The Finalizer thread establishes exception handling at its base, but defers all the AppDomain // transitions. void ManagedThreadBase::FinalizerBase(ADCallBackFcnType pTarget) { WRAPPER_NO_CONTRACT; ManagedThreadBase_NoADTransition(pTarget, FinalizerThread); } //+---------------------------------------------------------------------------- // // Method: Thread::GetStaticFieldAddress private // // Synopsis: Get the address of the field relative to the current thread. // If an address has not been assigned yet then create one. // //+---------------------------------------------------------------------------- LPVOID Thread::GetStaticFieldAddress(FieldDesc *pFD) { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; _ASSERTE(pFD != NULL); _ASSERTE(pFD->IsThreadStatic()); _ASSERTE(!pFD->IsRVA()); // for static field the MethodTable is exact even for generic classes MethodTable *pMT = pFD->GetEnclosingMethodTable(); // We need to make sure that the class has been allocated, however // we should not call the class constructor ThreadStatics::GetTLM(pMT)->EnsureClassAllocated(pMT); PTR_BYTE base = NULL; if (pFD->GetFieldType() == ELEMENT_TYPE_CLASS || pFD->GetFieldType() == ELEMENT_TYPE_VALUETYPE) { base = pMT->GetGCThreadStaticsBasePointer(); } else { base = pMT->GetNonGCThreadStaticsBasePointer(); } _ASSERTE(base != NULL); DWORD offset = pFD->GetOffset(); _ASSERTE(offset <= FIELD_OFFSET_LAST_REAL_OFFSET); LPVOID result = (LPVOID)((PTR_BYTE)base + (DWORD)offset); // For value classes, the handle points at an OBJECTREF // which holds the boxed value class, so derefernce and unbox. if (pFD->GetFieldType() == ELEMENT_TYPE_VALUETYPE) { OBJECTREF obj = ObjectToOBJECTREF(*(Object**) result); result = obj->GetData(); } return result; } #endif // #ifndef DACCESS_COMPILE //+---------------------------------------------------------------------------- // // Method: Thread::GetStaticFieldAddrNoCreate private // // Synopsis: Get the address of the field relative to the thread. // If an address has not been assigned, return NULL. // No creating is allowed. // //+---------------------------------------------------------------------------- TADDR Thread::GetStaticFieldAddrNoCreate(FieldDesc *pFD) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; _ASSERTE(pFD != NULL); _ASSERTE(pFD->IsThreadStatic()); // for static field the MethodTable is exact even for generic classes PTR_MethodTable pMT = pFD->GetEnclosingMethodTable(); PTR_BYTE base = NULL; if (pFD->GetFieldType() == ELEMENT_TYPE_CLASS || pFD->GetFieldType() == ELEMENT_TYPE_VALUETYPE) { base = pMT->GetGCThreadStaticsBasePointer(dac_cast<PTR_Thread>(this)); } else { base = pMT->GetNonGCThreadStaticsBasePointer(dac_cast<PTR_Thread>(this)); } if (base == NULL) return NULL; DWORD offset = pFD->GetOffset(); _ASSERTE(offset <= FIELD_OFFSET_LAST_REAL_OFFSET); TADDR result = dac_cast<TADDR>(base) + (DWORD)offset; // For value classes, the handle points at an OBJECTREF // which holds the boxed value class, so derefernce and unbox. if (pFD->IsByValue()) { _ASSERTE(result != NULL); PTR_Object obj = *PTR_UNCHECKED_OBJECTREF(result); if (obj == NULL) return NULL; result = dac_cast<TADDR>(obj->GetData()); } return result; } #ifndef DACCESS_COMPILE // // NotifyFrameChainOfExceptionUnwind // ----------------------------------------------------------- // This method will walk the Frame chain from pStartFrame to // the last frame that is below pvLimitSP and will call each // frame's ExceptionUnwind method. It will return the first // Frame that is above pvLimitSP. // Frame * Thread::NotifyFrameChainOfExceptionUnwind(Frame* pStartFrame, LPVOID pvLimitSP) { CONTRACTL { NOTHROW; DISABLED(GC_TRIGGERS); // due to UnwindFrameChain from NOTRIGGER areas MODE_COOPERATIVE; PRECONDITION(CheckPointer(pStartFrame)); PRECONDITION(CheckPointer(pvLimitSP)); } CONTRACTL_END; Frame * pFrame; #ifdef _DEBUG // // assert that the specified Thread's Frame chain actually // contains the start Frame. // pFrame = m_pFrame; while ((pFrame != pStartFrame) && (pFrame != FRAME_TOP)) { pFrame = pFrame->Next(); } CONSISTENCY_CHECK_MSG(pFrame == pStartFrame, "pStartFrame is not on pThread's Frame chain!"); #endif // _DEBUG pFrame = pStartFrame; while (pFrame < pvLimitSP) { CONSISTENCY_CHECK(pFrame != PTR_NULL); CONSISTENCY_CHECK((pFrame) > static_cast<Frame *>((LPVOID)GetCurrentSP())); pFrame->ExceptionUnwind(); pFrame = pFrame->Next(); } // return the frame after the last one notified of the unwind return pFrame; } //+---------------------------------------------------------------------------- // // Method: Thread::DeleteThreadStaticData private // // Synopsis: Delete the static data for each appdomain that this thread // visited. // // //+---------------------------------------------------------------------------- void Thread::DeleteThreadStaticData() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; m_ThreadLocalBlock.FreeTable(); } //+---------------------------------------------------------------------------- // // Method: Thread::DeleteThreadStaticData public // // Synopsis: Delete the static data for the given module. This is called // when the AssemblyLoadContext unloads. // // //+---------------------------------------------------------------------------- void Thread::DeleteThreadStaticData(ModuleIndex index) { m_ThreadLocalBlock.FreeTLM(index.m_dwIndex, FALSE /* isThreadShuttingDown */); } OBJECTREF Thread::GetCulture(BOOL bUICulture) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; // This is the case when we're building CoreLib and haven't yet created // the system assembly. if (SystemDomain::System()->SystemAssembly()==NULL || g_fForbidEnterEE) { return NULL; } OBJECTREF pCurrentCulture; MethodDescCallSite propGet(bUICulture ? METHOD__CULTURE_INFO__GET_CURRENT_UI_CULTURE : METHOD__CULTURE_INFO__GET_CURRENT_CULTURE); ARG_SLOT retVal = propGet.Call_RetArgSlot(NULL); pCurrentCulture = ArgSlotToObj(retVal); return pCurrentCulture; } void Thread::SetCulture(OBJECTREF *CultureObj, BOOL bUICulture) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; MethodDescCallSite propSet(bUICulture ? METHOD__CULTURE_INFO__SET_CURRENT_UI_CULTURE : METHOD__CULTURE_INFO__SET_CURRENT_CULTURE); // Set up the Stack. ARG_SLOT pNewArgs[] = { ObjToArgSlot(*CultureObj) }; // Make the actual call. propSet.Call_RetArgSlot(pNewArgs); } BOOL ThreadStore::HoldingThreadStore(Thread *pThread) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; if (pThread) { return (pThread == s_pThreadStore->m_HoldingThread); } else { return (s_pThreadStore->m_holderthreadid.IsCurrentThread()); } } NOINLINE void Thread::OnIncrementCountOverflow(UINT32 *threadLocalCount, UINT64 *overflowCount) { WRAPPER_NO_CONTRACT; _ASSERTE(threadLocalCount != nullptr); _ASSERTE(overflowCount != nullptr); // Increment overflow, accumulate the count for this increment into the overflow count and reset the thread-local count // The thread store lock, in coordination with other places that read these values, ensures that both changes // below become visible together ThreadStoreLockHolder tsl; *threadLocalCount = 0; InterlockedExchangeAdd64((LONGLONG *)overflowCount, (LONGLONG)UINT32_MAX + 1); } UINT64 Thread::GetTotalCount(SIZE_T threadLocalCountOffset, UINT64 *overflowCount) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; _ASSERTE(overflowCount != nullptr); // enumerate all threads, summing their local counts. ThreadStoreLockHolder tsl; UINT64 total = GetOverflowCount(overflowCount); Thread *pThread = NULL; while ((pThread = ThreadStore::GetAllThreadList(pThread, 0, 0)) != NULL) { total += *GetThreadLocalCountRef(pThread, threadLocalCountOffset); } return total; } UINT64 Thread::GetTotalThreadPoolCompletionCount() { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; _ASSERTE(!ThreadpoolMgr::UsePortableThreadPoolForIO()); bool usePortableThreadPool = ThreadpoolMgr::UsePortableThreadPool(); // enumerate all threads, summing their local counts. ThreadStoreLockHolder tsl; UINT64 total = GetIOThreadPoolCompletionCountOverflow(); if (!usePortableThreadPool) { total += GetWorkerThreadPoolCompletionCountOverflow(); } Thread *pThread = NULL; while ((pThread = ThreadStore::GetAllThreadList(pThread, 0, 0)) != NULL) { if (!usePortableThreadPool) { total += pThread->m_workerThreadPoolCompletionCount; } total += pThread->m_ioThreadPoolCompletionCount; } return total; } INT32 Thread::ResetManagedThreadObject(INT32 nPriority) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; GCX_COOP(); return ResetManagedThreadObjectInCoopMode(nPriority); } INT32 Thread::ResetManagedThreadObjectInCoopMode(INT32 nPriority) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; THREADBASEREF pObject = (THREADBASEREF)ObjectFromHandle(m_ExposedObject); if (pObject != NULL) { pObject->ResetName(); nPriority = pObject->GetPriority(); } return nPriority; } BOOL Thread::IsRealThreadPoolResetNeeded() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; if(!IsBackground()) return TRUE; THREADBASEREF pObject = (THREADBASEREF)ObjectFromHandle(m_ExposedObject); if(pObject != NULL) { INT32 nPriority = pObject->GetPriority(); if(nPriority != ThreadNative::PRIORITY_NORMAL) return TRUE; } return FALSE; } void Thread::InternalReset(BOOL fNotFinalizerThread, BOOL fThreadObjectResetNeeded, BOOL fResetAbort) { CONTRACTL { NOTHROW; if(!fNotFinalizerThread || fThreadObjectResetNeeded) {GC_TRIGGERS;} else {GC_NOTRIGGER;} } CONTRACTL_END; _ASSERTE (this == GetThread()); INT32 nPriority = ThreadNative::PRIORITY_NORMAL; if (!fNotFinalizerThread && this == FinalizerThread::GetFinalizerThread()) { nPriority = ThreadNative::PRIORITY_HIGHEST; } if(fThreadObjectResetNeeded) { nPriority = ResetManagedThreadObject(nPriority); } if (fResetAbort && IsAbortRequested()) { UnmarkThreadForAbort(); } if (IsThreadPoolThread() && fThreadObjectResetNeeded) { SetBackground(TRUE); if (nPriority != ThreadNative::PRIORITY_NORMAL) { SetThreadPriority(THREAD_PRIORITY_NORMAL); } } else if (!fNotFinalizerThread && this == FinalizerThread::GetFinalizerThread()) { SetBackground(TRUE); if (nPriority != ThreadNative::PRIORITY_HIGHEST) { SetThreadPriority(THREAD_PRIORITY_HIGHEST); } } } DeadlockAwareLock::DeadlockAwareLock(const char *description) : m_pHoldingThread(NULL) #ifdef _DEBUG , m_description(description) #endif { LIMITED_METHOD_CONTRACT; } DeadlockAwareLock::~DeadlockAwareLock() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; CAN_TAKE_LOCK; } CONTRACTL_END; // Wait for another thread to leave its loop in DeadlockAwareLock::TryBeginEnterLock CrstHolder lock(&g_DeadlockAwareCrst); } CHECK DeadlockAwareLock::CheckDeadlock(Thread *pThread) { CONTRACTL { PRECONDITION(g_DeadlockAwareCrst.OwnedByCurrentThread()); NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // Note that this check is recursive in order to produce descriptive check failure messages. Thread *pHoldingThread = m_pHoldingThread.Load(); if (pThread == pHoldingThread) { CHECK_FAILF(("Lock %p (%s) is held by thread %d", this, m_description, pThread)); } if (pHoldingThread != NULL) { DeadlockAwareLock *pBlockingLock = pHoldingThread->m_pBlockingLock.Load(); if (pBlockingLock != NULL) { CHECK_MSGF(pBlockingLock->CheckDeadlock(pThread), ("Deadlock: Lock %p (%s) is held by thread %d", this, m_description, pHoldingThread)); } } CHECK_OK; } BOOL DeadlockAwareLock::CanEnterLock() { Thread * pThread = GetThread(); CONSISTENCY_CHECK_MSG(pThread->m_pBlockingLock.Load() == NULL, "Cannot block on two locks at once"); { CrstHolder lock(&g_DeadlockAwareCrst); // Look for deadlocks DeadlockAwareLock *pLock = this; while (TRUE) { Thread * holdingThread = pLock->m_pHoldingThread; if (holdingThread == pThread) { // Deadlock! return FALSE; } if (holdingThread == NULL) { // Lock is unheld break; } pLock = holdingThread->m_pBlockingLock; if (pLock == NULL) { // Thread is running free break; } } return TRUE; } } BOOL DeadlockAwareLock::TryBeginEnterLock() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; Thread * pThread = GetThread(); CONSISTENCY_CHECK_MSG(pThread->m_pBlockingLock.Load() == NULL, "Cannot block on two locks at once"); { CrstHolder lock(&g_DeadlockAwareCrst); // Look for deadlocks DeadlockAwareLock *pLock = this; while (TRUE) { Thread * holdingThread = pLock->m_pHoldingThread; if (holdingThread == pThread) { // Deadlock! return FALSE; } if (holdingThread == NULL) { // Lock is unheld break; } pLock = holdingThread->m_pBlockingLock; if (pLock == NULL) { // Thread is running free break; } } pThread->m_pBlockingLock = this; } return TRUE; }; void DeadlockAwareLock::BeginEnterLock() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; Thread * pThread = GetThread(); CONSISTENCY_CHECK_MSG(pThread->m_pBlockingLock.Load() == NULL, "Cannot block on two locks at once"); { CrstHolder lock(&g_DeadlockAwareCrst); // Look for deadlock loop CONSISTENCY_CHECK_MSG(CheckDeadlock(pThread), "Deadlock detected!"); pThread->m_pBlockingLock = this; } }; void DeadlockAwareLock::EndEnterLock() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; Thread * pThread = GetThread(); CONSISTENCY_CHECK(m_pHoldingThread.Load() == NULL || m_pHoldingThread.Load() == pThread); CONSISTENCY_CHECK(pThread->m_pBlockingLock.Load() == this); // No need to take a lock when going from blocking to holding. This // transition implies the lack of a deadlock that other threads can see. // (If they would see a deadlock after the transition, they would see // one before as well.) m_pHoldingThread = pThread; } void DeadlockAwareLock::LeaveLock() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; CONSISTENCY_CHECK(m_pHoldingThread == GetThread()); CONSISTENCY_CHECK(GetThread()->m_pBlockingLock.Load() == NULL); m_pHoldingThread = NULL; } #ifdef _DEBUG // Normally, any thread we operate on has a Thread block in its TLS. But there are // a few special threads we don't normally execute managed code on. // // There is a scenario where we run managed code on such a thread, which is when the // DLL_THREAD_ATTACH notification of an (IJW?) module calls into managed code. This // is incredibly dangerous. If a GC is provoked, the system may have trouble performing // the GC because its threads aren't available yet. static DWORD SpecialEEThreads[10]; static LONG cnt_SpecialEEThreads = 0; void dbgOnly_IdentifySpecialEEThread() { WRAPPER_NO_CONTRACT; LONG ourCount = FastInterlockIncrement(&cnt_SpecialEEThreads); _ASSERTE(ourCount < (LONG) ARRAY_SIZE(SpecialEEThreads)); SpecialEEThreads[ourCount-1] = ::GetCurrentThreadId(); } BOOL dbgOnly_IsSpecialEEThread() { WRAPPER_NO_CONTRACT; DWORD ourId = ::GetCurrentThreadId(); for (LONG i=0; i<cnt_SpecialEEThreads; i++) if (ourId == SpecialEEThreads[i]) return TRUE; // If we have an EE thread doing helper thread duty, then it is temporarily // 'special' too. #ifdef DEBUGGING_SUPPORTED if (g_pDebugInterface) { //<TODO>We probably should use Thread::GetThreadId</TODO> DWORD helperID = g_pDebugInterface->GetHelperThreadID(); if (helperID == ourId) return TRUE; } #endif //<TODO>Clean this up</TODO> if (GetThreadNULLOk() == NULL) return TRUE; return FALSE; } #endif // _DEBUG void Thread::StaticInitialize() { WRAPPER_NO_CONTRACT; #ifdef FEATURE_SPECIAL_USER_MODE_APC InitializeSpecialUserModeApc(); // When CET shadow stacks are enabled, support for special user-mode APCs with the necessary functionality is required _ASSERTE_ALL_BUILDS(__FILE__, !AreCetShadowStacksEnabled() || UseSpecialUserModeApc()); #endif } #ifdef FEATURE_SPECIAL_USER_MODE_APC QueueUserAPC2Proc Thread::s_pfnQueueUserAPC2Proc; static void NTAPI EmptyApcCallback(ULONG_PTR Parameter) { LIMITED_METHOD_CONTRACT; } void Thread::InitializeSpecialUserModeApc() { WRAPPER_NO_CONTRACT; static_assert_no_msg(OFFSETOF__APC_CALLBACK_DATA__ContextRecord == offsetof(CLONE_APC_CALLBACK_DATA, ContextRecord)); HMODULE hKernel32 = WszLoadLibraryEx(WINDOWS_KERNEL32_DLLNAME_W, NULL, LOAD_LIBRARY_SEARCH_SYSTEM32); // See if QueueUserAPC2 exists QueueUserAPC2Proc pfnQueueUserAPC2Proc = (QueueUserAPC2Proc)GetProcAddress(hKernel32, "QueueUserAPC2"); if (pfnQueueUserAPC2Proc == nullptr) { return; } // See if QueueUserAPC2 supports the special user-mode APC with a callback that includes the interrupted CONTEXT. A special // user-mode APC can interrupt a thread that is in user mode and not in a non-alertable wait. if (!(*pfnQueueUserAPC2Proc)(EmptyApcCallback, GetCurrentThread(), 0, SpecialUserModeApcWithContextFlags)) { return; } // In the future, once code paths using the special user-mode APC get some bake time, it should be used regardless of // whether CET shadow stacks are enabled if (AreCetShadowStacksEnabled()) { s_pfnQueueUserAPC2Proc = pfnQueueUserAPC2Proc; } } #endif // FEATURE_SPECIAL_USER_MODE_APC #endif // #ifndef DACCESS_COMPILE #ifdef DACCESS_COMPILE void STATIC_DATA::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { WRAPPER_NO_CONTRACT; DAC_ENUM_STHIS(STATIC_DATA); } void Thread::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { WRAPPER_NO_CONTRACT; DAC_ENUM_DTHIS(); if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE) { if (m_pDomain.IsValid()) { m_pDomain->EnumMemoryRegions(flags, true); } } if (m_debuggerFilterContext.IsValid()) { m_debuggerFilterContext.EnumMem(); } OBJECTHANDLE_EnumMemoryRegions(m_LastThrownObjectHandle); m_ExceptionState.EnumChainMemoryRegions(flags); m_ThreadLocalBlock.EnumMemoryRegions(flags); if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE) { // // Allow all of the frames on the stack to enumerate // their memory. // PTR_Frame frame = m_pFrame; while (frame.IsValid() && frame.GetAddr() != dac_cast<TADDR>(FRAME_TOP)) { frame->EnumMemoryRegions(flags); frame = frame->m_Next; } } // // Try and do a stack trace and save information // for each part of the stack. This is very vulnerable // to memory problems so ignore all exceptions here. // CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED ( EnumMemoryRegionsWorker(flags); ); } void Thread::EnumMemoryRegionsWorker(CLRDataEnumMemoryFlags flags) { WRAPPER_NO_CONTRACT; if (IsUnstarted()) { return; } T_CONTEXT context; BOOL DacGetThreadContext(Thread* thread, T_CONTEXT* context); REGDISPLAY regDisp; StackFrameIterator frameIter; TADDR previousSP = 0; //start at zero; this allows first check to always succeed. TADDR currentSP; // Init value. The Limit itself is not legal, so move one target pointer size to the smallest-magnitude // legal address. currentSP = dac_cast<TADDR>(m_CacheStackLimit) + sizeof(TADDR); if (GetFilterContext()) { context = *GetFilterContext(); } else { DacGetThreadContext(this, &context); } if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE) { AppDomain::GetCurrentDomain()->EnumMemoryRegions(flags, true); } FillRegDisplay(&regDisp, &context); frameIter.Init(this, NULL, &regDisp, 0); while (frameIter.IsValid()) { // // There are identical stack pointer checking semantics in code:ClrDataAccess::EnumMemWalkStackHelper // You ***MUST*** maintain identical semantics for both checks! // // Before we continue, we should check to be sure we have a valid // stack pointer. This is to prevent stacks that are not walked // properly due to // a) stack corruption bugs // b) bad stack walks // from continuing on indefinitely. // // We will force SP to strictly increase. // this check can only happen for real stack frames (i.e. not for explicit frames that don't update the RegDisplay) // for ia64, SP may be equal, but in this case BSP must strictly decrease. // We will force SP to be properly aligned. // We will force SP to be in the correct range. // if (frameIter.GetFrameState() == StackFrameIterator::SFITER_FRAMELESS_METHOD) { // This check cannot be applied to explicit frames; they may not move the SP at all. // Also, a single function can push several on the stack at a time with no guarantees about // ordering so we can't check that the addresses of the explicit frames are monotonically increasing. // There is the potential that the walk will not terminate if a set of explicit frames reference // each other circularly. While we could choose a limit for the number of explicit frames allowed // in a row like the total stack size/pointer size, we have no known problems with this scenario. // Thus for now we ignore it. currentSP = (TADDR)GetRegdisplaySP(&regDisp); if (currentSP <= previousSP) { _ASSERTE(!"Target stack has been corrupted, SP for current frame must be larger than previous frame."); break; } } // On windows desktop, the stack pointer should be a multiple // of pointer-size-aligned in the target address space if (currentSP % sizeof(TADDR) != 0) { _ASSERTE(!"Target stack has been corrupted, SP must be aligned."); break; } if (!IsAddressInStack(currentSP)) { _ASSERTE(!"Target stack has been corrupted, SP must in in the stack range."); break; } // Enumerate the code around the call site to help debugger stack walking heuristics PCODE callEnd = GetControlPC(&regDisp); DacEnumCodeForStackwalk(callEnd); // To stackwalk through funceval frames, we need to be sure to preserve the // DebuggerModule's m_pRuntimeDomainAssembly. This is the only case that doesn't use the current // vmDomainAssembly in code:DacDbiInterfaceImpl::EnumerateInternalFrames. The following // code mimics that function. // Allow failure, since we want to continue attempting to walk the stack regardless of the outcome. EX_TRY { if ((frameIter.GetFrameState() == StackFrameIterator::SFITER_FRAME_FUNCTION) || (frameIter.GetFrameState() == StackFrameIterator::SFITER_SKIPPED_FRAME_FUNCTION)) { Frame * pFrame = frameIter.m_crawl.GetFrame(); g_pDebugInterface->EnumMemoryRegionsIfFuncEvalFrame(flags, pFrame); } } EX_CATCH_RETHROW_ONLY_COR_E_OPERATIONCANCELLED MethodDesc* pMD = frameIter.m_crawl.GetFunction(); if (pMD != NULL) { pMD->EnumMemoryRegions(flags); } previousSP = currentSP; if (frameIter.Next() != SWA_CONTINUE) { break; } } } void ThreadStore::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; WRAPPER_NO_CONTRACT; // This will write out the context of the s_pThreadStore. ie // just the pointer // s_pThreadStore.EnumMem(); if (s_pThreadStore.IsValid()) { // write out the whole ThreadStore structure DacEnumHostDPtrMem(s_pThreadStore); // The thread list may be corrupt, so just // ignore exceptions during enumeration. EX_TRY { Thread* thread = s_pThreadStore->m_ThreadList.GetHead(); LONG dwNumThreads = s_pThreadStore->m_ThreadCount; for (LONG i = 0; (i < dwNumThreads) && (thread != NULL); i++) { // Even if this thread is totally broken and we can't enum it, struggle on. // If we do not, we will leave this loop and not enum stack memory for any further threads. CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED( thread->EnumMemoryRegions(flags); ); thread = s_pThreadStore->m_ThreadList.GetNext(thread); } } EX_CATCH_RETHROW_ONLY_COR_E_OPERATIONCANCELLED } } #endif // #ifdef DACCESS_COMPILE
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // THREADS.CPP // // // #include "common.h" #include "frames.h" #include "threads.h" #include "stackwalk.h" #include "excep.h" #include "comsynchronizable.h" #include "log.h" #include "gcheaputilities.h" #include "mscoree.h" #include "dbginterface.h" #include "corprof.h" // profiling #include "eeprofinterfaces.h" #include "eeconfig.h" #include "corhost.h" #include "win32threadpool.h" #include "jitinterface.h" #include "eventtrace.h" #include "comutilnative.h" #include "finalizerthread.h" #include "threadsuspend.h" #include "wrappers.h" #include "nativeoverlapped.h" #include "appdomain.inl" #include "vmholder.h" #include "exceptmacros.h" #include "win32threadpool.h" #ifdef FEATURE_COMINTEROP #include "runtimecallablewrapper.h" #include "interoputil.h" #include "interoputil.inl" #endif // FEATURE_COMINTEROP #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT #include "olecontexthelpers.h" #include "roapi.h" #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT #ifdef FEATURE_SPECIAL_USER_MODE_APC #include "asmconstants.h" #endif static const PortableTailCallFrame g_sentinelTailCallFrame = { NULL, NULL }; TailCallTls::TailCallTls() // A new frame will always be allocated before the frame is modified, // so casting away const is ok here. : m_frame(const_cast<PortableTailCallFrame*>(&g_sentinelTailCallFrame)) , m_argBuffer(NULL) { } Thread* STDCALL GetThreadHelper() { return GetThreadNULLOk(); } TailCallArgBuffer* TailCallTls::AllocArgBuffer(int size, void* gcDesc) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END _ASSERTE(size >= (int)offsetof(TailCallArgBuffer, Args)); if (m_argBuffer != NULL && m_argBuffer->Size < size) { FreeArgBuffer(); } if (m_argBuffer == NULL) { m_argBuffer = (TailCallArgBuffer*)new (nothrow) BYTE[size]; if (m_argBuffer == NULL) return NULL; m_argBuffer->Size = size; } m_argBuffer->State = TAILCALLARGBUFFER_ACTIVE; m_argBuffer->GCDesc = gcDesc; if (gcDesc != NULL) { memset(m_argBuffer->Args, 0, size - offsetof(TailCallArgBuffer, Args)); } return m_argBuffer; } #if defined (_DEBUG_IMPL) || defined(_PREFAST_) thread_local int t_ForbidGCLoaderUseCount; #endif uint64_t Thread::dead_threads_non_alloc_bytes = 0; SPTR_IMPL(ThreadStore, ThreadStore, s_pThreadStore); CONTEXT* ThreadStore::s_pOSContext = NULL; BYTE* ThreadStore::s_pOSContextBuffer = NULL; CLREvent *ThreadStore::s_pWaitForStackCrawlEvent; PTR_ThreadLocalModule ThreadLocalBlock::GetTLMIfExists(ModuleIndex index) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; if (index.m_dwIndex >= m_TLMTableSize) return NULL; return m_pTLMTable[index.m_dwIndex].pTLM; } PTR_ThreadLocalModule ThreadLocalBlock::GetTLMIfExists(MethodTable* pMT) { WRAPPER_NO_CONTRACT; ModuleIndex index = pMT->GetModuleForStatics()->GetModuleIndex(); return GetTLMIfExists(index); } #ifndef DACCESS_COMPILE BOOL Thread::s_fCleanFinalizedThread = FALSE; UINT64 Thread::s_workerThreadPoolCompletionCountOverflow = 0; UINT64 Thread::s_ioThreadPoolCompletionCountOverflow = 0; UINT64 Thread::s_monitorLockContentionCountOverflow = 0; CrstStatic g_DeadlockAwareCrst; // // A transient thread value that indicates this thread is currently walking its stack // or the stack of another thread. This value is useful to help short-circuit // some problematic checks in the loader, guarantee that types & assemblies // encountered during the walk must already be loaded, and provide information to control // assembly loading behavior during stack walks. // // This value is set around the main portions of the stack walk (as those portions may // enter the type & assembly loaders). This is also explicitly cleared while the // walking thread calls the stackwalker callback or needs to execute managed code, as // such calls may execute arbitrary code unrelated to the actual stack walking, and // may never return, in the case of exception stackwalk callbacks. // thread_local Thread* t_pStackWalkerWalkingThread; #if defined(_DEBUG) BOOL MatchThreadHandleToOsId ( HANDLE h, DWORD osId ) { #ifndef TARGET_UNIX LIMITED_METHOD_CONTRACT; DWORD id = GetThreadId(h); // OS call GetThreadId may fail, and return 0. In this case we can not // make a decision if the two match or not. Instead, we ignore this check. return id == 0 || id == osId; #else // !TARGET_UNIX return TRUE; #endif // !TARGET_UNIX } #endif // _DEBUG #ifdef _DEBUG_IMPL template<> AutoCleanupGCAssert<TRUE>::AutoCleanupGCAssert() { SCAN_SCOPE_BEGIN; STATIC_CONTRACT_MODE_COOPERATIVE; } template<> AutoCleanupGCAssert<FALSE>::AutoCleanupGCAssert() { SCAN_SCOPE_BEGIN; STATIC_CONTRACT_MODE_PREEMPTIVE; } template<> void GCAssert<TRUE>::BeginGCAssert() { SCAN_SCOPE_BEGIN; STATIC_CONTRACT_MODE_COOPERATIVE; } template<> void GCAssert<FALSE>::BeginGCAssert() { SCAN_SCOPE_BEGIN; STATIC_CONTRACT_MODE_PREEMPTIVE; } #endif // #define NEW_TLS 1 #ifdef _DEBUG void Thread::SetFrame(Frame *pFrame) { CONTRACTL { NOTHROW; GC_NOTRIGGER; DEBUG_ONLY; MODE_COOPERATIVE; // It only makes sense for a Thread to call SetFrame on itself. PRECONDITION(this == GetThread()); PRECONDITION(CheckPointer(pFrame)); } CONTRACTL_END; if (g_pConfig->fAssertOnFailFast()) { Frame *pWalk = m_pFrame; BOOL fExist = FALSE; while (pWalk != (Frame*) -1) { if (pWalk == pFrame) { fExist = TRUE; break; } pWalk = pWalk->m_Next; } pWalk = m_pFrame; while (fExist && pWalk != pFrame && pWalk != (Frame*)-1) { pWalk = pWalk->m_Next; } } m_pFrame = pFrame; // If stack overrun corruptions are expected, then skip this check // as the Frame chain may have been corrupted. if (g_pConfig->fAssertOnFailFast() == false) return; Frame* espVal = (Frame*)GetCurrentSP(); while (pFrame != (Frame*) -1) { static Frame* stopFrame = 0; if (pFrame == stopFrame) _ASSERTE(!"SetFrame frame == stopFrame"); _ASSERTE(IsExecutingOnAltStack() || espVal < pFrame); _ASSERTE(IsExecutingOnAltStack() || pFrame < m_CacheStackBase); _ASSERTE(pFrame->GetFrameType() < Frame::TYPE_COUNT); pFrame = pFrame->m_Next; } } #endif // _DEBUG //************************************************************************ // PRIVATE GLOBALS //************************************************************************ extern unsigned __int64 getTimeStamp(); extern unsigned __int64 getTickFrequency(); unsigned __int64 tgetFrequency() { static unsigned __int64 cachedFreq = (unsigned __int64) -1; if (cachedFreq != (unsigned __int64) -1) return cachedFreq; else { cachedFreq = getTickFrequency(); return cachedFreq; } } #endif // #ifndef DACCESS_COMPILE static StackWalkAction DetectHandleILStubsForDebugger_StackWalkCallback(CrawlFrame *pCF, VOID *pData) { WRAPPER_NO_CONTRACT; // It suffices to wait for the first CrawlFrame with non-NULL function MethodDesc *pMD = pCF->GetFunction(); if (pMD != NULL) { *(bool *)pData = pMD->IsILStub(); return SWA_ABORT; } return SWA_CONTINUE; } // This is really just a heuristic to detect if we are executing in an M2U IL stub or // one of the marshaling methods it calls. It doesn't deal with U2M IL stubs. // We loop through the frame chain looking for an uninitialized TransitionFrame. // If there is one, then we are executing in an M2U IL stub or one of the methods it calls. // On the other hand, if there is an initialized TransitionFrame, then we are not. // Also, if there is an HMF on the stack, then we stop. This could be the case where // an IL stub calls an FCALL which ends up in a managed method, and the debugger wants to // stop in those cases. Some examples are COMException..ctor and custom marshalers. // // X86 IL stubs use InlinedCallFrame and are indistinguishable from ordinary methods with // inlined P/Invoke when judging just from the frame chain. We use stack walk to decide // this case. bool Thread::DetectHandleILStubsForDebugger() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; Frame* pFrame = GetFrame(); if (pFrame != NULL) { while (pFrame != FRAME_TOP) { // Check for HMF's. See the comment at the beginning of this function. if (pFrame->GetVTablePtr() == HelperMethodFrame::GetMethodFrameVPtr()) { break; } // If there is an entry frame (i.e. U2M managed), we should break. else if (pFrame->GetFrameType() == Frame::TYPE_ENTRY) { break; } // Check for M2U transition frames. See the comment at the beginning of this function. else if (pFrame->GetFrameType() == Frame::TYPE_EXIT) { if (pFrame->GetReturnAddress() == NULL) { // If the return address is NULL, then the frame has not been initialized yet. // We may see InlinedCallFrame in ordinary methods as well. Have to do // stack walk to find out if this is really an IL stub. bool fInILStub = false; StackWalkFrames(&DetectHandleILStubsForDebugger_StackWalkCallback, &fInILStub, QUICKUNWIND, dac_cast<PTR_Frame>(pFrame)); if (fInILStub) return true; } else { // The frame is fully initialized. return false; } } pFrame = pFrame->Next(); } } return false; } #ifndef _MSC_VER __thread ThreadLocalInfo gCurrentThreadInfo; #endif #ifndef DACCESS_COMPILE void SetThread(Thread* t) { LIMITED_METHOD_CONTRACT gCurrentThreadInfo.m_pThread = t; if (t != NULL) { EnsureTlsDestructionMonitor(); } } void SetAppDomain(AppDomain* ad) { LIMITED_METHOD_CONTRACT gCurrentThreadInfo.m_pAppDomain = ad; } BOOL Thread::Alert () { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; BOOL fRetVal = FALSE; { HANDLE handle = GetThreadHandle(); if (handle != INVALID_HANDLE_VALUE) { fRetVal = ::QueueUserAPC(UserInterruptAPC, handle, APC_Code); } } return fRetVal; } DWORD Thread::Join(DWORD timeout, BOOL alertable) { WRAPPER_NO_CONTRACT; return JoinEx(timeout,alertable?WaitMode_Alertable:WaitMode_None); } DWORD Thread::JoinEx(DWORD timeout, WaitMode mode) { CONTRACTL { THROWS; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; BOOL alertable = (mode & WaitMode_Alertable)?TRUE:FALSE; Thread *pCurThread = GetThreadNULLOk(); _ASSERTE(pCurThread || dbgOnly_IsSpecialEEThread()); { // We're not hosted, so WaitMode_InDeadlock is irrelevant. Clear it, so that this wait can be // forwarded to a SynchronizationContext if needed. mode = (WaitMode)(mode & ~WaitMode_InDeadlock); HANDLE handle = GetThreadHandle(); if (handle == INVALID_HANDLE_VALUE) { return WAIT_FAILED; } if (pCurThread) { return pCurThread->DoAppropriateWait(1, &handle, FALSE, timeout, mode); } else { return WaitForSingleObjectEx(handle,timeout,alertable); } } } extern INT32 MapFromNTPriority(INT32 NTPriority); BOOL Thread::SetThreadPriority( int nPriority // thread priority level ) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; BOOL fRet; { if (GetThreadHandle() == INVALID_HANDLE_VALUE) { // When the thread starts running, we will set the thread priority. fRet = TRUE; } else fRet = ::SetThreadPriority(GetThreadHandle(), nPriority); } if (fRet) { GCX_COOP(); THREADBASEREF pObject = (THREADBASEREF)ObjectFromHandle(m_ExposedObject); if (pObject != NULL) { // TODO: managed ThreadPriority only supports up to 4. pObject->SetPriority (MapFromNTPriority(nPriority)); } } return fRet; } int Thread::GetThreadPriority() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; int nRetVal = -1; if (GetThreadHandle() == INVALID_HANDLE_VALUE) { nRetVal = FALSE; } else nRetVal = ::GetThreadPriority(GetThreadHandle()); return nRetVal; } void Thread::ChooseThreadCPUGroupAffinity() { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; #ifndef TARGET_UNIX if (!CPUGroupInfo::CanEnableGCCPUGroups() || !CPUGroupInfo::CanEnableThreadUseAllCpuGroups() || !CPUGroupInfo::CanAssignCpuGroupsToThreads()) { return; } //Borrow the ThreadStore Lock here: Lock ThreadStore before distributing threads ThreadStoreLockHolder TSLockHolder(TRUE); // this thread already has CPU group affinity set if (m_pAffinityMask != 0) return; if (GetThreadHandle() == INVALID_HANDLE_VALUE) return; GROUP_AFFINITY groupAffinity; CPUGroupInfo::ChooseCPUGroupAffinity(&groupAffinity); CPUGroupInfo::SetThreadGroupAffinity(GetThreadHandle(), &groupAffinity, NULL); m_wCPUGroup = groupAffinity.Group; m_pAffinityMask = groupAffinity.Mask; #endif // !TARGET_UNIX } void Thread::ClearThreadCPUGroupAffinity() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; #ifndef TARGET_UNIX if (!CPUGroupInfo::CanEnableGCCPUGroups() || !CPUGroupInfo::CanEnableThreadUseAllCpuGroups() || !CPUGroupInfo::CanAssignCpuGroupsToThreads()) { return; } ThreadStoreLockHolder TSLockHolder(TRUE); // this thread does not have CPU group affinity set if (m_pAffinityMask == 0) return; GROUP_AFFINITY groupAffinity; groupAffinity.Group = m_wCPUGroup; groupAffinity.Mask = m_pAffinityMask; CPUGroupInfo::ClearCPUGroupAffinity(&groupAffinity); m_wCPUGroup = 0; m_pAffinityMask = 0; #endif // !TARGET_UNIX } DWORD Thread::StartThread() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; #ifdef _DEBUG _ASSERTE (m_Creator.IsCurrentThread()); m_Creator.Clear(); #endif _ASSERTE (GetThreadHandle() != INVALID_HANDLE_VALUE); DWORD dwRetVal = ::ResumeThread(GetThreadHandle()); return dwRetVal; } // Class static data: LONG Thread::m_DebugWillSyncCount = -1; LONG Thread::m_DetachCount = 0; LONG Thread::m_ActiveDetachCount = 0; static void DeleteThread(Thread* pThread) { CONTRACTL { NOTHROW; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; //_ASSERTE (pThread == GetThread()); SetThread(NULL); SetAppDomain(NULL); if (pThread->HasThreadStateNC(Thread::TSNC_ExistInThreadStore)) { pThread->DetachThread(FALSE); } else { #ifdef FEATURE_COMINTEROP pThread->RevokeApartmentSpy(); #endif // FEATURE_COMINTEROP FastInterlockOr((ULONG *)&pThread->m_State, Thread::TS_Dead); // ~Thread() calls SafeSetThrowables which has a conditional contract // which says that if you call it with a NULL throwable then it is // MODE_ANY, otherwise MODE_COOPERATIVE. Scan doesn't understand that // and assumes that we're violating the MODE_COOPERATIVE. CONTRACT_VIOLATION(ModeViolation); delete pThread; } } static void EnsurePreemptive() { WRAPPER_NO_CONTRACT; Thread *pThread = GetThreadNULLOk(); if (pThread && pThread->PreemptiveGCDisabled()) { pThread->EnablePreemptiveGC(); } } typedef StateHolder<DoNothing, EnsurePreemptive> EnsurePreemptiveModeIfException; Thread* SetupThread() { CONTRACTL { THROWS; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; Thread* pThread; if ((pThread = GetThreadNULLOk()) != NULL) return pThread; // For interop debugging, we must mark that we're in a can't-stop region // b.c we may take Crsts here that may block the helper thread. // We're especially fragile here b/c we don't have a Thread object yet CantStopHolder hCantStop; EnsurePreemptiveModeIfException ensurePreemptive; #ifdef _DEBUG CHECK chk; if (g_pConfig->SuppressChecks()) { // EnterAssert will suppress any checks chk.EnterAssert(); } #endif // Normally, HasStarted is called from the thread's entrypoint to introduce it to // the runtime. But sometimes that thread is used for DLL_THREAD_ATTACH notifications // that call into managed code. In that case, a call to SetupThread here must // find the correct Thread object and install it into TLS. if (ThreadStore::s_pThreadStore->GetPendingThreadCount() != 0) { DWORD ourOSThreadId = ::GetCurrentThreadId(); { ThreadStoreLockHolder TSLockHolder; _ASSERTE(pThread == NULL); while ((pThread = ThreadStore::s_pThreadStore->GetAllThreadList(pThread, Thread::TS_Unstarted | Thread::TS_FailStarted, Thread::TS_Unstarted)) != NULL) { if (pThread->GetOSThreadId() == ourOSThreadId) { break; } } if (pThread != NULL) { STRESS_LOG2(LF_SYNC, LL_INFO1000, "T::ST - recycling thread 0x%p (state: 0x%x)\n", pThread, pThread->m_State.Load()); } } // It's perfectly reasonable to not find the thread. It's just an unrelated // thread spinning up. if (pThread) { if (IsThreadPoolWorkerSpecialThread()) { FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread); pThread->SetBackground(TRUE); } else if (IsThreadPoolIOCompletionSpecialThread()) { FastInterlockOr ((ULONG *) &pThread->m_State, Thread::TS_CompletionPortThread); pThread->SetBackground(TRUE); } else if (IsTimerSpecialThread() || IsWaitSpecialThread()) { FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread); pThread->SetBackground(TRUE); } BOOL fStatus = pThread->HasStarted(); ensurePreemptive.SuppressRelease(); return fStatus ? pThread : NULL; } } // First time we've seen this thread in the runtime: pThread = new Thread(); // What state are we in here? COOP??? Holder<Thread*,DoNothing<Thread*>,DeleteThread> threadHolder(pThread); SetupTLSForThread(); pThread->InitThread(); pThread->PrepareApartmentAndContext(); // reset any unstarted bits on the thread object FastInterlockAnd((ULONG *) &pThread->m_State, ~Thread::TS_Unstarted); FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_LegalToJoin); ThreadStore::AddThread(pThread); SetThread(pThread); SetAppDomain(pThread->GetDomain()); #ifdef FEATURE_INTEROP_DEBUGGING // Ensure that debugger word slot is allocated TlsSetValue(g_debuggerWordTLSIndex, 0); #endif // We now have a Thread object visable to the RS. unmark special status. hCantStop.Release(); threadHolder.SuppressRelease(); FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_FullyInitialized); #ifdef DEBUGGING_SUPPORTED // // If we're debugging, let the debugger know that this // thread is up and running now. // if (CORDebuggerAttached()) { g_pDebugInterface->ThreadCreated(pThread); } else { LOG((LF_CORDB, LL_INFO10000, "ThreadCreated() not called due to CORDebuggerAttached() being FALSE for thread 0x%x\n", pThread->GetThreadId())); } #endif // DEBUGGING_SUPPORTED #ifdef PROFILING_SUPPORTED // If a profiler is present, then notify the profiler that a // thread has been created. if (!IsGCSpecialThread()) { BEGIN_PROFILER_CALLBACK(CORProfilerTrackThreads()); { GCX_PREEMP(); (&g_profControlBlock)->ThreadCreated( (ThreadID)pThread); } DWORD osThreadId = ::GetCurrentThreadId(); (&g_profControlBlock)->ThreadAssignedToOSThread( (ThreadID)pThread, osThreadId); END_PROFILER_CALLBACK(); } #endif // PROFILING_SUPPORTED _ASSERTE(!pThread->IsBackground()); // doesn't matter, but worth checking pThread->SetBackground(TRUE); ensurePreemptive.SuppressRelease(); if (IsThreadPoolWorkerSpecialThread()) { FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread); } else if (IsThreadPoolIOCompletionSpecialThread()) { FastInterlockOr ((ULONG *) &pThread->m_State, Thread::TS_CompletionPortThread); } else if (IsTimerSpecialThread() || IsWaitSpecialThread()) { FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread); } #ifdef FEATURE_EVENT_TRACE ETW::ThreadLog::FireThreadCreated(pThread); #endif // FEATURE_EVENT_TRACE return pThread; } //------------------------------------------------------------------------- // Public function: SetupThreadNoThrow() // Creates Thread for current thread if not previously created. // Returns NULL for failure (usually due to out-of-memory.) //------------------------------------------------------------------------- Thread* SetupThreadNoThrow(HRESULT *pHR) { CONTRACTL { NOTHROW; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; HRESULT hr = S_OK; Thread *pThread = GetThreadNULLOk(); if (pThread != NULL) { return pThread; } EX_TRY { pThread = SetupThread(); } EX_CATCH { // We failed SetupThread. GET_EXCEPTION() may depend on Thread object. if (__pException == NULL) { hr = E_OUTOFMEMORY; } else { hr = GET_EXCEPTION()->GetHR(); } } EX_END_CATCH(SwallowAllExceptions); if (pHR) { *pHR = hr; } return pThread; } //------------------------------------------------------------------------- // Public function: SetupUnstartedThread() // This sets up a Thread object for an exposed System.Thread that // has not been started yet. This allows us to properly enumerate all threads // in the ThreadStore, so we can report on even unstarted threads. Clearly // there is no physical thread to match, yet. // // When there is, complete the setup with code:Thread::HasStarted() //------------------------------------------------------------------------- Thread* SetupUnstartedThread(SetupUnstartedThreadFlags flags) { CONTRACTL { THROWS; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; Thread* pThread = new Thread(); if (flags & SUTF_ThreadStoreLockAlreadyTaken) { _ASSERTE(ThreadStore::HoldingThreadStore()); pThread->SetThreadStateNC(Thread::TSNC_TSLTakenForStartup); } FastInterlockOr((ULONG *) &pThread->m_State, (Thread::TS_Unstarted | Thread::TS_WeOwn)); ThreadStore::AddThread(pThread); return pThread; } //------------------------------------------------------------------------- // Public function: DestroyThread() // Destroys the specified Thread object, for a thread which is about to die. //------------------------------------------------------------------------- void DestroyThread(Thread *th) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; _ASSERTE (th == GetThread()); GCX_PREEMP_NO_DTOR(); if (th->IsAbortRequested()) { // Reset trapping count. th->UnmarkThreadForAbort(); } // Clear any outstanding stale EH state that maybe still active on the thread. #ifdef FEATURE_EH_FUNCLETS ExceptionTracker::PopTrackers((void*)-1); #else // !FEATURE_EH_FUNCLETS #ifdef TARGET_X86 PTR_ThreadExceptionState pExState = th->GetExceptionState(); if (pExState->IsExceptionInProgress()) { GCX_COOP(); pExState->GetCurrentExceptionTracker()->UnwindExInfo((void *)-1); } #else // !TARGET_X86 #error Unsupported platform #endif // TARGET_X86 #endif // FEATURE_EH_FUNCLETS if (g_fEEShutDown == 0) { th->SetThreadState(Thread::TS_ReportDead); th->OnThreadTerminate(FALSE); } } //------------------------------------------------------------------------- // Public function: DetachThread() // Marks the thread as needing to be destroyed, but doesn't destroy it yet. //------------------------------------------------------------------------- HRESULT Thread::DetachThread(BOOL fDLLThreadDetach) { // !!! Can not use contract here. // !!! Contract depends on Thread object for GC_TRIGGERS. // !!! At the end of this function, we call InternalSwitchOut, // !!! and then GetThread()=NULL, and dtor of contract does not work any more. STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; // Clear any outstanding stale EH state that maybe still active on the thread. #ifdef FEATURE_EH_FUNCLETS ExceptionTracker::PopTrackers((void*)-1); #else // !FEATURE_EH_FUNCLETS #ifdef TARGET_X86 PTR_ThreadExceptionState pExState = GetExceptionState(); if (pExState->IsExceptionInProgress()) { GCX_COOP(); pExState->GetCurrentExceptionTracker()->UnwindExInfo((void *)-1); } #else // !TARGET_X86 #error Unsupported platform #endif // TARGET_X86 #endif // FEATURE_EH_FUNCLETS #ifdef FEATURE_COMINTEROP IErrorInfo *pErrorInfo; // Avoid calling GetErrorInfo() if ole32 has already executed the DLL_THREAD_DETACH, // otherwise we'll cause ole32 to re-allocate and leak its TLS data (SOleTlsData). if (ClrTeb::GetOleReservedPtr() != NULL && GetErrorInfo(0, &pErrorInfo) == S_OK) { // if this is our IErrorInfo, release it now - we don't want ole32 to do it later as // part of its DLL_THREAD_DETACH as we won't be able to handle the call at that point if (!ComInterfaceSlotIs(pErrorInfo, 2, Unknown_ReleaseSpecial_IErrorInfo)) { // if it's not our IErrorInfo, put it back SetErrorInfo(0, pErrorInfo); } pErrorInfo->Release(); } // Revoke our IInitializeSpy registration only if we are not in DLL_THREAD_DETACH // (COM will do it or may have already done it automatically in that case). if (!fDLLThreadDetach) { RevokeApartmentSpy(); } #endif // FEATURE_COMINTEROP _ASSERTE(!PreemptiveGCDisabled()); _ASSERTE ((m_State & Thread::TS_Detached) == 0); _ASSERTE (this == GetThread()); FastInterlockIncrement(&Thread::m_DetachCount); if (IsAbortRequested()) { // Reset trapping count. UnmarkThreadForAbort(); } if (!IsBackground()) { FastInterlockIncrement(&Thread::m_ActiveDetachCount); ThreadStore::CheckForEEShutdown(); } HANDLE hThread = GetThreadHandle(); SetThreadHandle (INVALID_HANDLE_VALUE); while (m_dwThreadHandleBeingUsed > 0) { // Another thread is using the handle now. #undef Sleep // We can not call __SwitchToThread since we can not go back to host. ::Sleep(10); #define Sleep(a) Dont_Use_Sleep(a) } if (m_WeOwnThreadHandle && m_ThreadHandleForClose == INVALID_HANDLE_VALUE) { m_ThreadHandleForClose = hThread; } // We need to make sure that TLS are touched last here. SetThread(NULL); SetAppDomain(NULL); FastInterlockOr((ULONG*)&m_State, (int) (Thread::TS_Detached | Thread::TS_ReportDead)); // Do not touch Thread object any more. It may be destroyed. // These detached threads will be cleaned up by finalizer thread. But if the process uses // little managed heap, it will be a while before GC happens, and finalizer thread starts // working on detached thread. So we wake up finalizer thread to clean up resources. // // (It's possible that this is the startup thread, and startup failed, and so the finalization // machinery isn't fully initialized. Hence this check.) if (g_fEEStarted) FinalizerThread::EnableFinalization(); return S_OK; } DWORD GetRuntimeId() { LIMITED_METHOD_CONTRACT; #ifdef HOST_WINDOWS return _tls_index; #else return 0; #endif } //--------------------------------------------------------------------------- // Creates new Thread for reverse p-invoke calls. //--------------------------------------------------------------------------- Thread* WINAPI CreateThreadBlockThrow() { WRAPPER_NO_CONTRACT; // This is a workaround to disable our check for throwing exception in SetupThread. // We want to throw an exception for reverse p-invoke, and our assertion may fire if // a unmanaged caller does not setup an exception handler. CONTRACT_VIOLATION(ThrowsViolation); // WON'T FIX - This enables catastrophic failure exception in reverse P/Invoke - the only way we can communicate an error to legacy code. Thread* pThread = NULL; BEGIN_ENTRYPOINT_THROWS; HRESULT hr = S_OK; pThread = SetupThreadNoThrow(&hr); if (pThread == NULL) { // Creating Thread failed, and we need to throw an exception to report status. // It is misleading to use our COM+ exception code, since this is not a managed exception. ULONG_PTR arg = hr; RaiseException(EXCEPTION_EXX, 0, 1, &arg); } END_ENTRYPOINT_THROWS; return pThread; } #ifdef _DEBUG DWORD_PTR Thread::OBJREF_HASH = OBJREF_TABSIZE; #endif extern "C" void STDCALL JIT_PatchedCodeStart(); extern "C" void STDCALL JIT_PatchedCodeLast(); static void* s_barrierCopy = NULL; BYTE* GetWriteBarrierCodeLocation(VOID* barrier) { if (IsWriteBarrierCopyEnabled()) { return (BYTE*)PINSTRToPCODE((TADDR)s_barrierCopy + ((TADDR)barrier - (TADDR)JIT_PatchedCodeStart)); } else { return (BYTE*)barrier; } } BOOL IsIPInWriteBarrierCodeCopy(PCODE controlPc) { if (IsWriteBarrierCopyEnabled()) { return (s_barrierCopy <= (void*)controlPc && (void*)controlPc < ((BYTE*)s_barrierCopy + ((BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart))); } else { return FALSE; } } PCODE AdjustWriteBarrierIP(PCODE controlPc) { _ASSERTE(IsIPInWriteBarrierCodeCopy(controlPc)); // Pretend we were executing the barrier function at its original location so that the unwinder can unwind the frame return (PCODE)JIT_PatchedCodeStart + (controlPc - (PCODE)s_barrierCopy); } #ifdef TARGET_X86 extern "C" void *JIT_WriteBarrierEAX_Loc; #else extern "C" void *JIT_WriteBarrier_Loc; #endif #ifdef TARGET_ARM64 extern "C" void (*JIT_WriteBarrier_Table)(); extern "C" void *JIT_WriteBarrier_Loc; void *JIT_WriteBarrier_Loc = 0; extern "C" void *JIT_WriteBarrier_Table_Loc; void *JIT_WriteBarrier_Table_Loc = 0; #endif // TARGET_ARM64 #ifdef TARGET_ARM extern "C" void *JIT_WriteBarrier_Loc = 0; #endif // TARGET_ARM #ifndef TARGET_UNIX // g_TlsIndex is only used by the DAC. Disable optimizations around it to prevent it from getting optimized out. #pragma optimize("", off) static void SetIlsIndex(DWORD tlsIndex) { g_TlsIndex = tlsIndex; } #pragma optimize("", on) #endif //--------------------------------------------------------------------------- // One-time initialization. Called during Dll initialization. So // be careful what you do in here! //--------------------------------------------------------------------------- void InitThreadManager() { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; // All patched helpers should fit into one page. // If you hit this assert on retail build, there is most likely problem with BBT script. _ASSERTE_ALL_BUILDS("clr/src/VM/threads.cpp", (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart > (ptrdiff_t)0); _ASSERTE_ALL_BUILDS("clr/src/VM/threads.cpp", (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart < (ptrdiff_t)GetOsPageSize()); if (IsWriteBarrierCopyEnabled()) { s_barrierCopy = ExecutableAllocator::Instance()->Reserve(g_SystemInfo.dwAllocationGranularity); ExecutableAllocator::Instance()->Commit(s_barrierCopy, g_SystemInfo.dwAllocationGranularity, true); if (s_barrierCopy == NULL) { _ASSERTE(!"Allocation of GC barrier code page failed"); COMPlusThrowWin32(); } { size_t writeBarrierSize = (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart; ExecutableWriterHolder<void> barrierWriterHolder(s_barrierCopy, writeBarrierSize); memcpy(barrierWriterHolder.GetRW(), (BYTE*)JIT_PatchedCodeStart, writeBarrierSize); } // Store the JIT_WriteBarrier copy location to a global variable so that helpers // can jump to it. #ifdef TARGET_X86 JIT_WriteBarrierEAX_Loc = GetWriteBarrierCodeLocation((void*)JIT_WriteBarrierEAX); #define X86_WRITE_BARRIER_REGISTER(reg) \ SetJitHelperFunction(CORINFO_HELP_ASSIGN_REF_##reg, GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier##reg)); \ ETW::MethodLog::StubInitialized((ULONGLONG)GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier##reg), W("@WriteBarrier" #reg)); ENUM_X86_WRITE_BARRIER_REGISTERS() #undef X86_WRITE_BARRIER_REGISTER #else // TARGET_X86 JIT_WriteBarrier_Loc = GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier); #endif // TARGET_X86 SetJitHelperFunction(CORINFO_HELP_ASSIGN_REF, GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier)); ETW::MethodLog::StubInitialized((ULONGLONG)GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier), W("@WriteBarrier")); #ifdef TARGET_ARM64 // Store the JIT_WriteBarrier_Table copy location to a global variable so that it can be updated. JIT_WriteBarrier_Table_Loc = GetWriteBarrierCodeLocation((void*)&JIT_WriteBarrier_Table); #endif // TARGET_ARM64 #if defined(TARGET_ARM64) || defined(TARGET_ARM) SetJitHelperFunction(CORINFO_HELP_CHECKED_ASSIGN_REF, GetWriteBarrierCodeLocation((void*)JIT_CheckedWriteBarrier)); ETW::MethodLog::StubInitialized((ULONGLONG)GetWriteBarrierCodeLocation((void*)JIT_CheckedWriteBarrier), W("@CheckedWriteBarrier")); SetJitHelperFunction(CORINFO_HELP_ASSIGN_BYREF, GetWriteBarrierCodeLocation((void*)JIT_ByRefWriteBarrier)); ETW::MethodLog::StubInitialized((ULONGLONG)GetWriteBarrierCodeLocation((void*)JIT_ByRefWriteBarrier), W("@ByRefWriteBarrier")); #endif // TARGET_ARM64 || TARGET_ARM } else { // I am using virtual protect to cover the entire range that this code falls in. // // We could reset it to non-writeable inbetween GCs and such, but then we'd have to keep on re-writing back and forth, // so instead we'll leave it writable from here forward. DWORD oldProt; if (!ClrVirtualProtect((void *)JIT_PatchedCodeStart, (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart, PAGE_EXECUTE_READWRITE, &oldProt)) { _ASSERTE(!"ClrVirtualProtect of code page failed"); COMPlusThrowWin32(); } #ifdef TARGET_X86 JIT_WriteBarrierEAX_Loc = (void*)JIT_WriteBarrierEAX; #else JIT_WriteBarrier_Loc = (void*)JIT_WriteBarrier; #endif #ifdef TARGET_ARM64 // Store the JIT_WriteBarrier_Table copy location to a global variable so that it can be updated. JIT_WriteBarrier_Table_Loc = (void*)&JIT_WriteBarrier_Table; #endif // TARGET_ARM64 } #ifndef TARGET_UNIX _ASSERTE(GetThreadNULLOk() == NULL); size_t offsetOfCurrentThreadInfo = Thread::GetOffsetOfThreadStatic(&gCurrentThreadInfo); _ASSERTE(offsetOfCurrentThreadInfo < 0x8000); _ASSERTE(_tls_index < 0x10000); // Save gCurrentThreadInfo location for debugger SetIlsIndex((DWORD)(_tls_index + (offsetOfCurrentThreadInfo << 16) + 0x80000000)); _ASSERTE(g_TrapReturningThreads == 0); #endif // !TARGET_UNIX #ifdef FEATURE_INTEROP_DEBUGGING g_debuggerWordTLSIndex = TlsAlloc(); if (g_debuggerWordTLSIndex == TLS_OUT_OF_INDEXES) COMPlusThrowWin32(); #endif IfFailThrow(Thread::CLRSetThreadStackGuarantee(Thread::STSGuarantee_Force)); ThreadStore::InitThreadStore(); // NOTE: CRST_UNSAFE_ANYMODE prevents a GC mode switch when entering this crst. // If you remove this flag, we will switch to preemptive mode when entering // g_DeadlockAwareCrst, which means all functions that enter it will become // GC_TRIGGERS. (This includes all uses of CrstHolder.) So be sure // to update the contracts if you remove this flag. g_DeadlockAwareCrst.Init(CrstDeadlockDetection, CRST_UNSAFE_ANYMODE); #ifdef _DEBUG // Randomize OBJREF_HASH to handle hash collision. Thread::OBJREF_HASH = OBJREF_TABSIZE - (DbgGetEXETimeStamp()%10); #endif // _DEBUG ThreadSuspend::Initialize(); } //************************************************************************ // Thread members //************************************************************************ #if defined(_DEBUG) && defined(TRACK_SYNC) // One outstanding synchronization held by this thread: struct Dbg_TrackSyncEntry { UINT_PTR m_caller; AwareLock *m_pAwareLock; BOOL Equiv (UINT_PTR caller, void *pAwareLock) { LIMITED_METHOD_CONTRACT; return (m_caller == caller) && (m_pAwareLock == pAwareLock); } BOOL Equiv (void *pAwareLock) { LIMITED_METHOD_CONTRACT; return (m_pAwareLock == pAwareLock); } }; // Each thread has a stack that tracks all enter and leave requests struct Dbg_TrackSyncStack : public Dbg_TrackSync { enum { MAX_TRACK_SYNC = 20, // adjust stack depth as necessary }; void EnterSync (UINT_PTR caller, void *pAwareLock); void LeaveSync (UINT_PTR caller, void *pAwareLock); Dbg_TrackSyncEntry m_Stack [MAX_TRACK_SYNC]; UINT_PTR m_StackPointer; BOOL m_Active; Dbg_TrackSyncStack() : m_StackPointer(0), m_Active(TRUE) { LIMITED_METHOD_CONTRACT; } }; void Dbg_TrackSyncStack::EnterSync(UINT_PTR caller, void *pAwareLock) { LIMITED_METHOD_CONTRACT; STRESS_LOG4(LF_SYNC, LL_INFO100, "Dbg_TrackSyncStack::EnterSync, IP=%p, Recursion=%u, LockState=%x, HoldingThread=%p.\n", caller, ((AwareLock*)pAwareLock)->GetRecursionLevel(), ((AwareLock*)pAwareLock)->GetLockState(), ((AwareLock*)pAwareLock)->GetHoldingThread()); if (m_Active) { if (m_StackPointer >= MAX_TRACK_SYNC) { _ASSERTE(!"Overflowed synchronization stack checking. Disabling"); m_Active = FALSE; return; } } m_Stack[m_StackPointer].m_caller = caller; m_Stack[m_StackPointer].m_pAwareLock = (AwareLock *) pAwareLock; m_StackPointer++; } void Dbg_TrackSyncStack::LeaveSync(UINT_PTR caller, void *pAwareLock) { WRAPPER_NO_CONTRACT; STRESS_LOG4(LF_SYNC, LL_INFO100, "Dbg_TrackSyncStack::LeaveSync, IP=%p, Recursion=%u, LockState=%x, HoldingThread=%p.\n", caller, ((AwareLock*)pAwareLock)->GetRecursionLevel(), ((AwareLock*)pAwareLock)->GetLockState(), ((AwareLock*)pAwareLock)->GetHoldingThread()); if (m_Active) { if (m_StackPointer == 0) _ASSERTE(!"Underflow in leaving synchronization"); else if (m_Stack[m_StackPointer - 1].Equiv(pAwareLock)) { m_StackPointer--; } else { for (int i=m_StackPointer - 2; i>=0; i--) { if (m_Stack[i].Equiv(pAwareLock)) { _ASSERTE(!"Locks are released out of order. This might be okay..."); memcpy(&m_Stack[i], &m_Stack[i+1], sizeof(m_Stack[0]) * (m_StackPointer - i - 1)); return; } } _ASSERTE(!"Trying to release a synchronization lock which isn't held"); } } } #endif // TRACK_SYNC static DWORD dwHashCodeSeed = 123456789; //-------------------------------------------------------------------- // Thread construction //-------------------------------------------------------------------- Thread::Thread() { CONTRACTL { THROWS; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; m_pFrame = FRAME_TOP; m_pGCFrame = NULL; m_fPreemptiveGCDisabled = 0; #ifdef _DEBUG m_ulForbidTypeLoad = 0; m_GCOnTransitionsOK = TRUE; #endif #ifdef ENABLE_CONTRACTS m_ulEnablePreemptiveGCCount = 0; #endif #ifdef _DEBUG dbg_m_cSuspendedThreads = 0; dbg_m_cSuspendedThreadsWithoutOSLock = 0; m_Creator.Clear(); m_dwUnbreakableLockCount = 0; #endif m_dwForbidSuspendThread = 0; // Initialize lock state m_pHead = &m_embeddedEntry; m_embeddedEntry.pNext = m_pHead; m_embeddedEntry.pPrev = m_pHead; m_embeddedEntry.dwLLockID = 0; m_embeddedEntry.dwULockID = 0; m_embeddedEntry.wReaderLevel = 0; m_pBlockingLock = NULL; m_alloc_context.init(); m_thAllocContextObj = 0; m_UserInterrupt = 0; m_WaitEventLink.m_Next = NULL; m_WaitEventLink.m_LinkSB.m_pNext = NULL; m_ThreadHandle = INVALID_HANDLE_VALUE; m_ThreadHandleForClose = INVALID_HANDLE_VALUE; m_ThreadHandleForResume = INVALID_HANDLE_VALUE; m_WeOwnThreadHandle = FALSE; #ifdef _DEBUG m_ThreadId = UNINITIALIZED_THREADID; #endif //_DEBUG // Initialize this variable to a very different start value for each thread // Using linear congruential generator from Knuth Vol. 2, p. 102, line 24 dwHashCodeSeed = dwHashCodeSeed * 1566083941 + 1; m_dwHashCodeSeed = dwHashCodeSeed; m_hijackLock = FALSE; m_OSThreadId = 0; m_Priority = INVALID_THREAD_PRIORITY; m_ExternalRefCount = 1; m_State = TS_Unstarted; m_StateNC = TSNC_Unknown; // It can't be a LongWeakHandle because we zero stuff out of the exposed // object as it is finalized. At that point, calls to GetCurrentThread() // had better get a new one,! m_ExposedObject = CreateGlobalShortWeakHandle(NULL); GlobalShortWeakHandleHolder exposedObjectHolder(m_ExposedObject); m_StrongHndToExposedObject = CreateGlobalStrongHandle(NULL); GlobalStrongHandleHolder strongHndToExposedObjectHolder(m_StrongHndToExposedObject); m_LastThrownObjectHandle = NULL; m_ltoIsUnhandled = FALSE; m_debuggerFilterContext = NULL; m_fInteropDebuggingHijacked = FALSE; m_profilerCallbackState = 0; for (int i = 0; i < MAX_NOTIFICATION_PROFILERS + 1; ++i) { m_dwProfilerEvacuationCounters[i] = 0; } m_pProfilerFilterContext = NULL; m_CacheStackBase = 0; m_CacheStackLimit = 0; m_CacheStackSufficientExecutionLimit = 0; m_CacheStackStackAllocNonRiskyExecutionLimit = 0; #ifdef _DEBUG m_pCleanedStackBase = NULL; #endif #ifdef STACK_GUARDS_DEBUG m_pCurrentStackGuard = NULL; #endif #ifdef FEATURE_HIJACK m_ppvHJRetAddrPtr = (VOID**) 0xCCCCCCCCCCCCCCCC; m_pvHJRetAddr = (VOID*) 0xCCCCCCCCCCCCCCCC; #ifndef TARGET_UNIX X86_ONLY(m_LastRedirectIP = 0); X86_ONLY(m_SpinCount = 0); #endif // TARGET_UNIX #endif // FEATURE_HIJACK #if defined(_DEBUG) && defined(TRACK_SYNC) m_pTrackSync = new Dbg_TrackSyncStack; NewHolder<Dbg_TrackSyncStack> trackSyncHolder(static_cast<Dbg_TrackSyncStack*>(m_pTrackSync)); #endif // TRACK_SYNC m_PreventAsync = 0; #ifdef FEATURE_COMINTEROP m_fDisableComObjectEagerCleanup = false; #endif //FEATURE_COMINTEROP m_fHasDeadThreadBeenConsideredForGCTrigger = false; m_TraceCallCount = 0; m_ThrewControlForThread = 0; m_ThreadTasks = (ThreadTasks)0; m_pLoadLimiter= NULL; // The state and the tasks must be 32-bit aligned for atomicity to be guaranteed. _ASSERTE((((size_t) &m_State) & 3) == 0); _ASSERTE((((size_t) &m_ThreadTasks) & 3) == 0); // On all callbacks, call the trap code, which we now have // wired to cause a GC. Thus we will do a GC on all Transition Frame Transitions (and more). if (GCStress<cfg_transition>::IsEnabled()) { m_State = (ThreadState) (m_State | TS_GCOnTransitions); } m_AbortType = EEPolicy::TA_None; m_AbortEndTime = MAXULONGLONG; m_RudeAbortEndTime = MAXULONGLONG; m_AbortController = 0; m_AbortRequestLock = 0; m_fRudeAbortInitiated = FALSE; m_pIOCompletionContext = NULL; #ifdef _DEBUG m_fRudeAborted = FALSE; m_dwAbortPoint = 0; #endif m_OSContext = new CONTEXT(); NewHolder<CONTEXT> contextHolder(m_OSContext); m_pSavedRedirectContext = NULL; m_pOSContextBuffer = NULL; #ifdef _DEBUG m_RedirectContextInUse = false; #endif #ifdef FEATURE_COMINTEROP m_pRCWStack = new RCWStackHeader(); #endif #ifdef _DEBUG m_bGCStressing = FALSE; m_bUniqueStacking = FALSE; #endif m_pPendingTypeLoad = NULL; m_pIBCInfo = NULL; m_dwAVInRuntimeImplOkayCount = 0; #if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) && !defined(TARGET_UNIX) // GCCOVER m_fPreemptiveGCDisabledForGCStress = false; #endif #ifdef _DEBUG m_pHelperMethodFrameCallerList = (HelperMethodFrameCallerList*)-1; #endif m_pExceptionDuringStartup = NULL; #ifdef HAVE_GCCOVER m_pbDestCode = NULL; m_pbSrcCode = NULL; #if defined(GCCOVER_TOLERATE_SPURIOUS_AV) m_pLastAVAddress = NULL; #endif // defined(GCCOVER_TOLERATE_SPURIOUS_AV) #endif // HAVE_GCCOVER m_debuggerActivePatchSkipper = NULL; m_dwThreadHandleBeingUsed = 0; SetProfilerCallbacksAllowed(TRUE); m_pCreatingThrowableForException = NULL; #ifdef FEATURE_EH_FUNCLETS m_dwIndexClauseForCatch = 0; m_sfEstablisherOfActualHandlerFrame.Clear(); #endif // FEATURE_EH_FUNCLETS m_workerThreadPoolCompletionCount = 0; m_ioThreadPoolCompletionCount = 0; m_monitorLockContentionCount = 0; m_pDomain = SystemDomain::System()->DefaultDomain(); // Do not expose thread until it is fully constructed g_pThinLockThreadIdDispenser->NewId(this, this->m_ThreadId); // // DO NOT ADD ADDITIONAL CONSTRUCTION AFTER THIS POINT. // NewId() allows this Thread instance to be accessed via a Thread Id. Do not // add additional construction after this point to prevent the race condition // of accessing a partially constructed Thread via Thread Id lookup. // exposedObjectHolder.SuppressRelease(); strongHndToExposedObjectHolder.SuppressRelease(); #if defined(_DEBUG) && defined(TRACK_SYNC) trackSyncHolder.SuppressRelease(); #endif contextHolder.SuppressRelease(); #ifdef FEATURE_COMINTEROP m_uliInitializeSpyCookie.QuadPart = 0ul; m_fInitializeSpyRegistered = false; m_pLastSTACtxCookie = NULL; #endif // FEATURE_COMINTEROP m_fGCSpecial = FALSE; #ifndef TARGET_UNIX m_wCPUGroup = 0; m_pAffinityMask = 0; #endif // !TARGET_UNIX m_pAllLoggedTypes = NULL; #ifdef FEATURE_PERFTRACING memset(&m_activityId, 0, sizeof(m_activityId)); #endif // FEATURE_PERFTRACING m_HijackReturnKind = RT_Illegal; m_currentPrepareCodeConfig = nullptr; m_isInForbidSuspendForDebuggerRegion = false; m_hasPendingActivation = false; #ifdef _DEBUG memset(dangerousObjRefs, 0, sizeof(dangerousObjRefs)); #endif // _DEBUG } //-------------------------------------------------------------------- // Failable initialization occurs here. //-------------------------------------------------------------------- void Thread::InitThread() { CONTRACTL { THROWS; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; HANDLE hDup = INVALID_HANDLE_VALUE; BOOL ret = TRUE; // This message actually serves a purpose (which is why it is always run) // The Stress log is run during hijacking, when other threads can be suspended // at arbitrary locations (including when holding a lock that NT uses to serialize // all memory allocations). By sending a message now, we insure that the stress // log will not allocate memory at these critical times an avoid deadlock. STRESS_LOG2(LF_ALWAYS, LL_ALWAYS, "SetupThread managed Thread %p Thread Id = %x\n", this, GetThreadId()); #ifndef TARGET_UNIX // workaround: Remove this when we flow impersonation token to host. BOOL reverted = FALSE; HANDLE threadToken = INVALID_HANDLE_VALUE; #endif // !TARGET_UNIX if (m_ThreadHandle == INVALID_HANDLE_VALUE) { // For WinCE, all clients have the same handle for a thread. Duplication is // not possible. We make sure we never close this handle unless we created // the thread (TS_WeOwn). // // For Win32, each client has its own handle. This is achieved by duplicating // the pseudo-handle from ::GetCurrentThread(). Unlike WinCE, this service // returns a pseudo-handle which is only useful for duplication. In this case // each client is responsible for closing its own (duplicated) handle. // // We don't bother duplicating if WeOwn, because we created the handle in the // first place. // Thread is created when or after the physical thread started running HANDLE curProcess = ::GetCurrentProcess(); #ifndef TARGET_UNIX // If we're impersonating on NT, then DuplicateHandle(GetCurrentThread()) is going to give us a handle with only // THREAD_TERMINATE, THREAD_QUERY_INFORMATION, and THREAD_SET_INFORMATION. This doesn't include // THREAD_SUSPEND_RESUME nor THREAD_GET_CONTEXT. We need to be able to suspend the thread, and we need to be // able to get its context. Therefore, if we're impersonating, we revert to self, dup the handle, then // re-impersonate before we leave this routine. if (!RevertIfImpersonated(&reverted, &threadToken)) { COMPlusThrowWin32(); } class EnsureResetThreadToken { private: BOOL m_NeedReset; HANDLE m_threadToken; public: EnsureResetThreadToken(HANDLE threadToken, BOOL reverted) { m_threadToken = threadToken; m_NeedReset = reverted; } ~EnsureResetThreadToken() { UndoRevert(m_NeedReset, m_threadToken); if (m_threadToken != INVALID_HANDLE_VALUE) { CloseHandle(m_threadToken); } } }; EnsureResetThreadToken resetToken(threadToken, reverted); #endif // !TARGET_UNIX if (::DuplicateHandle(curProcess, ::GetCurrentThread(), curProcess, &hDup, 0 /*ignored*/, FALSE /*inherit*/, DUPLICATE_SAME_ACCESS)) { _ASSERTE(hDup != INVALID_HANDLE_VALUE); SetThreadHandle(hDup); m_WeOwnThreadHandle = TRUE; } else { COMPlusThrowWin32(); } } if ((m_State & TS_WeOwn) == 0) { if (!AllocHandles()) { ThrowOutOfMemory(); } } _ASSERTE(HasValidThreadHandle()); m_random.Init(); // Set floating point mode to round to nearest #ifndef TARGET_UNIX (void) _controlfp_s( NULL, _RC_NEAR, _RC_CHOP|_RC_UP|_RC_DOWN|_RC_NEAR ); m_pTEB = (struct _NT_TIB*)NtCurrentTeb(); #endif // !TARGET_UNIX if (m_CacheStackBase == 0) { _ASSERTE(m_CacheStackLimit == 0); ret = SetStackLimits(fAll); if (ret == FALSE) { ThrowOutOfMemory(); } } ret = Thread::AllocateIOCompletionContext(); if (!ret) { ThrowOutOfMemory(); } } // Allocate all the handles. When we are kicking of a new thread, we can call // here before the thread starts running. BOOL Thread::AllocHandles() { WRAPPER_NO_CONTRACT; _ASSERTE(!m_DebugSuspendEvent.IsValid()); _ASSERTE(!m_EventWait.IsValid()); BOOL fOK = TRUE; EX_TRY { // create a manual reset event for getting the thread to a safe point m_DebugSuspendEvent.CreateManualEvent(FALSE); m_EventWait.CreateManualEvent(TRUE); } EX_CATCH { fOK = FALSE; if (!m_DebugSuspendEvent.IsValid()) { m_DebugSuspendEvent.CloseEvent(); } if (!m_EventWait.IsValid()) { m_EventWait.CloseEvent(); } } EX_END_CATCH(RethrowTerminalExceptions); return fOK; } //-------------------------------------------------------------------- // This is the alternate path to SetupThread/InitThread. If we created // an unstarted thread, we have SetupUnstartedThread/HasStarted. //-------------------------------------------------------------------- BOOL Thread::HasStarted() { CONTRACTL { NOTHROW; DISABLED(GC_NOTRIGGER); } CONTRACTL_END; _ASSERTE(!m_fPreemptiveGCDisabled); // can't use PreemptiveGCDisabled() here // This is cheating a little. There is a pathway here from SetupThread, but only // via IJW SystemDomain::RunDllMain. Normally SetupThread returns a thread in // preemptive mode, ready for a transition. But in the IJW case, it can return a // cooperative mode thread. RunDllMain handles this "surprise" correctly. m_fPreemptiveGCDisabled = TRUE; // Normally, HasStarted is called from the thread's entrypoint to introduce it to // the runtime. But sometimes that thread is used for DLL_THREAD_ATTACH notifications // that call into managed code. In that case, the second HasStarted call is // redundant and should be ignored. if (GetThreadNULLOk() == this) return TRUE; _ASSERTE(GetThreadNULLOk() == 0); _ASSERTE(HasValidThreadHandle()); BOOL fCanCleanupCOMState = FALSE; BOOL res = TRUE; res = SetStackLimits(fAll); if (res == FALSE) { m_pExceptionDuringStartup = Exception::GetOOMException(); goto FAILURE; } // If any exception happens during HasStarted, we will cache the exception in Thread::m_pExceptionDuringStartup // which will be thrown in Thread.Start as an internal exception EX_TRY { SetupTLSForThread(); InitThread(); fCanCleanupCOMState = TRUE; // Preparing the COM apartment and context may attempt // to transition to Preemptive mode. At this point in // the thread's lifetime this can be a bad thing if a GC // is triggered (e.g. GCStress). Do the preparation prior // to the thread being set so the Preemptive mode transition // is a no-op. PrepareApartmentAndContext(); SetThread(this); SetAppDomain(m_pDomain); ThreadStore::TransferStartedThread(this); #ifdef FEATURE_EVENT_TRACE ETW::ThreadLog::FireThreadCreated(this); #endif // FEATURE_EVENT_TRACE } EX_CATCH { if (__pException != NULL) { __pException.SuppressRelease(); m_pExceptionDuringStartup = __pException; } res = FALSE; } EX_END_CATCH(SwallowAllExceptions); if (res == FALSE) goto FAILURE; FastInterlockOr((ULONG *) &m_State, TS_FullyInitialized); #ifdef DEBUGGING_SUPPORTED // // If we're debugging, let the debugger know that this // thread is up and running now. // if (CORDebuggerAttached()) { g_pDebugInterface->ThreadCreated(this); } else { LOG((LF_CORDB, LL_INFO10000, "ThreadCreated() not called due to CORDebuggerAttached() being FALSE for thread 0x%x\n", GetThreadId())); } #endif // DEBUGGING_SUPPORTED #ifdef PROFILING_SUPPORTED // If a profiler is running, let them know about the new thread. // // The call to IsGCSpecial is crucial to avoid a deadlock. See code:Thread::m_fGCSpecial for more // information if (!IsGCSpecial()) { BEGIN_PROFILER_CALLBACK(CORProfilerTrackThreads()); BOOL gcOnTransition = GC_ON_TRANSITIONS(FALSE); // disable GCStress 2 to avoid the profiler receiving a RuntimeThreadSuspended notification even before the ThreadCreated notification { GCX_PREEMP(); (&g_profControlBlock)->ThreadCreated((ThreadID) this); } GC_ON_TRANSITIONS(gcOnTransition); DWORD osThreadId = ::GetCurrentThreadId(); (&g_profControlBlock)->ThreadAssignedToOSThread( (ThreadID) this, osThreadId); END_PROFILER_CALLBACK(); } #endif // PROFILING_SUPPORTED // Reset the ThreadStoreLock state flag since the thread // has now been started. ResetThreadStateNC(Thread::TSNC_TSLTakenForStartup); return TRUE; FAILURE: if (m_fPreemptiveGCDisabled) { m_fPreemptiveGCDisabled = FALSE; } _ASSERTE (HasThreadState(TS_Unstarted)); SetThreadState(TS_FailStarted); if (GetThreadNULLOk() != NULL && IsAbortRequested()) UnmarkThreadForAbort(); #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT // // Undo the platform context initialization, so we don't leak a CoInitialize. // if (fCanCleanupCOMState) { // The thread pointer in TLS may not be set yet, if we had a failure before we set it. // So we'll set it up here (we'll unset it a few lines down). SetThread(this); CleanupCOMState(); } #endif FastInterlockDecrement(&ThreadStore::s_pThreadStore->m_PendingThreadCount); // One of the components of OtherThreadsComplete() has changed, so check whether // we should now exit the EE. ThreadStore::CheckForEEShutdown(); DecExternalCount(/*holdingLock*/ HasThreadStateNC(Thread::TSNC_TSLTakenForStartup)); SetThread(NULL); SetAppDomain(NULL); return FALSE; } BOOL Thread::AllocateIOCompletionContext() { WRAPPER_NO_CONTRACT; PIOCompletionContext pIOC = new (nothrow) IOCompletionContext; if(pIOC != NULL) { pIOC->lpOverlapped = NULL; m_pIOCompletionContext = pIOC; return TRUE; } else { return FALSE; } } VOID Thread::FreeIOCompletionContext() { WRAPPER_NO_CONTRACT; if (m_pIOCompletionContext != NULL) { PIOCompletionContext pIOC = (PIOCompletionContext) m_pIOCompletionContext; delete pIOC; m_pIOCompletionContext = NULL; } } void Thread::HandleThreadStartupFailure() { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; _ASSERTE(GetThreadNULLOk() != NULL); struct ProtectArgs { OBJECTREF pThrowable; OBJECTREF pReason; } args; memset(&args, 0, sizeof(ProtectArgs)); GCPROTECT_BEGIN(args); MethodTable *pMT = CoreLibBinder::GetException(kThreadStartException); args.pThrowable = AllocateObject(pMT); MethodDescCallSite exceptionCtor(METHOD__THREAD_START_EXCEPTION__EX_CTOR); if (m_pExceptionDuringStartup) { args.pReason = CLRException::GetThrowableFromException(m_pExceptionDuringStartup); Exception::Delete(m_pExceptionDuringStartup); m_pExceptionDuringStartup = NULL; } ARG_SLOT args1[] = { ObjToArgSlot(args.pThrowable), ObjToArgSlot(args.pReason), }; exceptionCtor.Call(args1); GCPROTECT_END(); //Prot RaiseTheExceptionInternalOnly(args.pThrowable, FALSE); } #ifndef TARGET_UNIX BOOL RevertIfImpersonated(BOOL *bReverted, HANDLE *phToken) { WRAPPER_NO_CONTRACT; BOOL bImpersonated = OpenThreadToken(GetCurrentThread(), // we are assuming that if this call fails, TOKEN_IMPERSONATE, // we are not impersonating. There is no win32 TRUE, // api to figure this out. The only alternative phToken); // is to use NtCurrentTeb->IsImpersonating(). if (bImpersonated) { *bReverted = RevertToSelf(); return *bReverted; } return TRUE; } void UndoRevert(BOOL bReverted, HANDLE hToken) { if (bReverted) { if (!SetThreadToken(NULL, hToken)) { _ASSERT("Undo Revert -> SetThreadToken failed"); STRESS_LOG1(LF_EH, LL_INFO100, "UndoRevert/SetThreadToken failed for hToken = %d\n",hToken); EEPOLICY_HANDLE_FATAL_ERROR(COR_E_SECURITY); } } return; } #endif // !TARGET_UNIX // We don't want ::CreateThread() calls scattered throughout the source. So gather // them all here. BOOL Thread::CreateNewThread(SIZE_T stackSize, LPTHREAD_START_ROUTINE start, void *args, LPCWSTR pName) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; BOOL bRet; //This assert is here to prevent a bug in the future // CreateTask currently takes a DWORD and we will downcast // if that interface changes to take a SIZE_T this Assert needs to be removed. // _ASSERTE(stackSize <= 0xFFFFFFFF); #ifndef TARGET_UNIX HandleHolder token; BOOL bReverted = FALSE; bRet = RevertIfImpersonated(&bReverted, &token); if (bRet != TRUE) return bRet; #endif // !TARGET_UNIX m_StateNC = (ThreadStateNoConcurrency)((ULONG)m_StateNC | TSNC_CLRCreatedThread); bRet = CreateNewOSThread(stackSize, start, args); #ifndef TARGET_UNIX UndoRevert(bReverted, token); #endif // !TARGET_UNIX if (pName != NULL) SetThreadName(m_ThreadHandle, pName); return bRet; } void Thread::InitializationForManagedThreadInNative(_In_ Thread* pThread) { CONTRACTL { NOTHROW; MODE_ANY; GC_TRIGGERS; PRECONDITION(pThread != NULL); } CONTRACTL_END; #ifdef FEATURE_OBJCMARSHAL { GCX_COOP_THREAD_EXISTS(pThread); PREPARE_NONVIRTUAL_CALLSITE(METHOD__AUTORELEASEPOOL__CREATEAUTORELEASEPOOL); DECLARE_ARGHOLDER_ARRAY(args, 0); CALL_MANAGED_METHOD_NORET(args); } #endif // FEATURE_OBJCMARSHAL } void Thread::CleanUpForManagedThreadInNative(_In_ Thread* pThread) { CONTRACTL { NOTHROW; MODE_ANY; GC_TRIGGERS; PRECONDITION(pThread != NULL); } CONTRACTL_END; #ifdef FEATURE_OBJCMARSHAL { GCX_COOP_THREAD_EXISTS(pThread); PREPARE_NONVIRTUAL_CALLSITE(METHOD__AUTORELEASEPOOL__DRAINAUTORELEASEPOOL); DECLARE_ARGHOLDER_ARRAY(args, 0); CALL_MANAGED_METHOD_NORET(args); } #endif // FEATURE_OBJCMARSHAL } HANDLE Thread::CreateUtilityThread(Thread::StackSizeBucket stackSizeBucket, LPTHREAD_START_ROUTINE start, void *args, LPCWSTR pName, DWORD flags, DWORD* pThreadId) { LIMITED_METHOD_CONTRACT; // TODO: we should always use small stacks for most of these threads. For CLR 4, we're being conservative // here because this is a last-minute fix. SIZE_T stackSize; switch (stackSizeBucket) { case StackSize_Small: stackSize = 256 * 1024; break; case StackSize_Medium: stackSize = 512 * 1024; break; default: _ASSERTE(!"Bad stack size bucket"); break; case StackSize_Large: stackSize = 1024 * 1024; break; } flags |= STACK_SIZE_PARAM_IS_A_RESERVATION; DWORD threadId; HANDLE hThread = CreateThread(NULL, stackSize, start, args, flags, &threadId); SetThreadName(hThread, pName); if (pThreadId) *pThreadId = threadId; return hThread; } // Represent the value of DEFAULT_STACK_SIZE as passed in the property bag to the host during construction static unsigned long s_defaultStackSizeProperty = 0; void ParseDefaultStackSize(LPCWSTR valueStr) { if (valueStr) { LPWSTR end; errno = 0; unsigned long value = wcstoul(valueStr, &end, 16); // Base 16 without a prefix if ((errno == ERANGE) // Parsed value doesn't fit in an unsigned long || (valueStr == end) // No characters parsed || (end == nullptr) // Unexpected condition (should never happen) || (end[0] != 0)) // Unprocessed terminal characters { ThrowHR(E_INVALIDARG); } else { s_defaultStackSizeProperty = value; } } } SIZE_T GetDefaultStackSizeSetting() { static DWORD s_defaultStackSizeEnv = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DefaultStackSize); uint64_t value = s_defaultStackSizeEnv ? s_defaultStackSizeEnv : s_defaultStackSizeProperty; SIZE_T minStack = 0x10000; // 64K - Somewhat arbitrary minimum thread stack size SIZE_T maxStack = 0x80000000; // 2G - Somewhat arbitrary maximum thread stack size if ((value >= maxStack) || ((value != 0) && (value < minStack))) { ThrowHR(E_INVALIDARG); } return (SIZE_T) value; } BOOL Thread::GetProcessDefaultStackSize(SIZE_T* reserveSize, SIZE_T* commitSize) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // // Let's get the stack sizes from the PE file that started process. // static SIZE_T ExeSizeOfStackReserve = 0; static SIZE_T ExeSizeOfStackCommit = 0; static BOOL fSizesGot = FALSE; if (!fSizesGot) { SIZE_T defaultStackSizeSetting = GetDefaultStackSizeSetting(); if (defaultStackSizeSetting != 0) { ExeSizeOfStackReserve = defaultStackSizeSetting; ExeSizeOfStackCommit = defaultStackSizeSetting; fSizesGot = TRUE; } } #ifndef TARGET_UNIX if (!fSizesGot) { HINSTANCE hInst = WszGetModuleHandle(NULL); _ASSERTE(hInst); // WszGetModuleHandle should never fail on the module that started the process. EX_TRY { PEDecoder pe(hInst); pe.GetEXEStackSizes(&ExeSizeOfStackReserve, &ExeSizeOfStackCommit); fSizesGot = TRUE; } EX_CATCH { fSizesGot = FALSE; } EX_END_CATCH(SwallowAllExceptions); } #endif // !TARGET_UNIX if (!fSizesGot) { //return some somewhat-reasonable numbers if (NULL != reserveSize) *reserveSize = 256*1024; if (NULL != commitSize) *commitSize = 256*1024; return FALSE; } if (NULL != reserveSize) *reserveSize = ExeSizeOfStackReserve; if (NULL != commitSize) *commitSize = ExeSizeOfStackCommit; return TRUE; } BOOL Thread::CreateNewOSThread(SIZE_T sizeToCommitOrReserve, LPTHREAD_START_ROUTINE start, void *args) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; #ifdef TARGET_UNIX SIZE_T ourId = 0; #else DWORD ourId = 0; #endif HANDLE h = NULL; DWORD dwCreationFlags = CREATE_SUSPENDED; dwCreationFlags |= STACK_SIZE_PARAM_IS_A_RESERVATION; if (sizeToCommitOrReserve == 0) { sizeToCommitOrReserve = GetDefaultStackSizeSetting(); } #ifndef TARGET_UNIX // the PAL does its own adjustments as necessary if (sizeToCommitOrReserve != 0 && sizeToCommitOrReserve <= GetOsPageSize()) { // On Windows, passing a value that is <= one page size bizarrely causes the OS to use the default stack size instead of // a minimum, which is undesirable. This adjustment fixes that issue to use a minimum stack size (typically 64 KB). sizeToCommitOrReserve = GetOsPageSize() + 1; } #endif // !TARGET_UNIX // Make sure we have all our handles, in case someone tries to suspend us // as we are starting up. if (!AllocHandles()) { // OS is out of handles/memory? return FALSE; } #ifdef TARGET_UNIX h = ::PAL_CreateThread64(NULL /*=SECURITY_ATTRIBUTES*/, #else h = ::CreateThread( NULL /*=SECURITY_ATTRIBUTES*/, #endif sizeToCommitOrReserve, start, args, dwCreationFlags, &ourId); if (h == NULL) return FALSE; _ASSERTE(!m_fPreemptiveGCDisabled); // leave in preemptive until HasStarted. SetThreadHandle(h); m_WeOwnThreadHandle = TRUE; // Before we do the resume, we need to take note of the new ThreadId. This // is necessary because -- before the thread starts executing at KickofThread -- // it may perform some DllMain DLL_THREAD_ATTACH notifications. These could // call into managed code. During the consequent SetupThread, we need to // perform the Thread::HasStarted call instead of going through the normal // 'new thread' pathway. _ASSERTE(GetOSThreadId() == 0); _ASSERTE(ourId != 0); m_OSThreadId = ourId; FastInterlockIncrement(&ThreadStore::s_pThreadStore->m_PendingThreadCount); #ifdef _DEBUG m_Creator.SetToCurrentThread(); #endif return TRUE; } // // #threadDestruction // // General comments on thread destruction. // // The C++ Thread object can survive beyond the time when the Win32 thread has died. // This is important if an exposed object has been created for this thread. The // exposed object will survive until it is GC'ed. // // A client like an exposed object can place an external reference count on that // object. We also place a reference count on it when we construct it, and we lose // that count when the thread finishes doing useful work (OnThreadTerminate). // // One way OnThreadTerminate() is called is when the thread finishes doing useful // work. This case always happens on the correct thread. // // The other way OnThreadTerminate() is called is during product shutdown. We do // a "best effort" to eliminate all threads except the Main thread before shutdown // happens. But there may be some background threads or external threads still // running. // // When the final reference count disappears, we destruct. Until then, the thread // remains in the ThreadStore, but is marked as "Dead". //<TODO> // @TODO cwb: for a typical shutdown, only background threads are still around. // Should we interrupt them? What about the non-typical shutdown?</TODO> int Thread::IncExternalCount() { CONTRACTL { NOTHROW; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; Thread *pCurThread = GetThreadNULLOk(); _ASSERTE(m_ExternalRefCount > 0); int retVal = FastInterlockIncrement((LONG*)&m_ExternalRefCount); // If we have an exposed object and the refcount is greater than one // we must make sure to keep a strong handle to the exposed object // so that we keep it alive even if nobody has a reference to it. if (pCurThread && ((*((void**)m_ExposedObject)) != NULL)) { // The exposed object exists and needs a strong handle so check // to see if it has one. // Only a managed thread can setup StrongHnd. if ((*((void**)m_StrongHndToExposedObject)) == NULL) { GCX_COOP(); // Store the object in the strong handle. StoreObjectInHandle(m_StrongHndToExposedObject, ObjectFromHandle(m_ExposedObject)); } } return retVal; } int Thread::DecExternalCount(BOOL holdingLock) { CONTRACTL { NOTHROW; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; // Note that it's possible to get here with a NULL current thread (during // shutdown of the thread manager). Thread *pCurThread = GetThreadNULLOk(); _ASSERTE (pCurThread == NULL || IsAtProcessExit() || (!holdingLock && !ThreadStore::HoldingThreadStore(pCurThread)) || (holdingLock && ThreadStore::HoldingThreadStore(pCurThread))); BOOL ToggleGC = FALSE; BOOL SelfDelete = FALSE; int retVal; // Must synchronize count and exposed object handle manipulation. We use the // thread lock for this, which implies that we must be in pre-emptive mode // to begin with and avoid any activity that would invoke a GC (this // acquires the thread store lock). if (pCurThread) { // TODO: we would prefer to use a GC Holder here, however it is hard // to get the case where we're deleting this thread correct given // the current macros. We want to supress the release of the holder // here which puts us in Preemptive mode, and also the switch to // Cooperative mode below, but since both holders will be named // the same thing (due to the generic nature of the macro) we can // not use GCX_*_SUPRESS_RELEASE() for 2 holders in the same scope // b/c they will both apply simply to the most narrowly scoped // holder. ToggleGC = pCurThread->PreemptiveGCDisabled(); if (ToggleGC) pCurThread->EnablePreemptiveGC(); } GCX_ASSERT_PREEMP(); ThreadStoreLockHolder tsLock(!holdingLock); _ASSERTE(m_ExternalRefCount >= 1); _ASSERTE(!holdingLock || ThreadStore::s_pThreadStore->m_Crst.GetEnterCount() > 0 || IsAtProcessExit()); retVal = FastInterlockDecrement((LONG*)&m_ExternalRefCount); if (retVal == 0) { HANDLE h = GetThreadHandle(); if (h == INVALID_HANDLE_VALUE) { h = m_ThreadHandleForClose; m_ThreadHandleForClose = INVALID_HANDLE_VALUE; } // Can not assert like this. We have already removed the Unstarted bit. //_ASSERTE (IsUnstarted() || h != INVALID_HANDLE_VALUE); if (h != INVALID_HANDLE_VALUE && m_WeOwnThreadHandle) { ::CloseHandle(h); SetThreadHandle(INVALID_HANDLE_VALUE); } // Switch back to cooperative mode to manipulate the thread. if (pCurThread) { // TODO: we would prefer to use GCX_COOP here, see comment above. pCurThread->DisablePreemptiveGC(); } GCX_ASSERT_COOP(); // during process detach the thread might still be in the thread list // if it hasn't seen its DLL_THREAD_DETACH yet. Use the following // tweak to decide if the thread has terminated yet. if (!HasValidThreadHandle()) { SelfDelete = this == pCurThread; m_ExceptionState.FreeAllStackTraces(); if (SelfDelete) { SetThread(NULL); } delete this; } tsLock.Release(); // It only makes sense to restore the GC mode if we didn't just destroy // our own thread object. if (pCurThread && !SelfDelete && !ToggleGC) { pCurThread->EnablePreemptiveGC(); } // Cannot use this here b/c it creates a holder named the same as GCX_ASSERT_COOP // in the same scope above... // // GCX_ASSERT_PREEMP() return retVal; } else if (pCurThread == NULL) { // We're in shutdown, too late to be worrying about having a strong // handle to the exposed thread object, we've already performed our // final GC. tsLock.Release(); return retVal; } else { // Check to see if the external ref count reaches exactly one. If this // is the case and we have an exposed object then it is that exposed object // that is holding a reference to us. To make sure that we are not the // ones keeping the exposed object alive we need to remove the strong // reference we have to it. if ((retVal == 1) && ((*((void**)m_StrongHndToExposedObject)) != NULL)) { // Switch back to cooperative mode to manipulate the object. // Don't want to switch back to COOP until we let go of the lock // however we are allowed to call StoreObjectInHandle here in preemptive // mode because we are setting the value to NULL. CONTRACT_VIOLATION(ModeViolation); // Clear the handle and leave the lock. // We do not have to to DisablePreemptiveGC here, because // we just want to put NULL into a handle. StoreObjectInHandle(m_StrongHndToExposedObject, NULL); tsLock.Release(); // Switch back to the initial GC mode. if (ToggleGC) { pCurThread->DisablePreemptiveGC(); } GCX_ASSERT_COOP(); return retVal; } } tsLock.Release(); // Switch back to the initial GC mode. if (ToggleGC) { pCurThread->DisablePreemptiveGC(); } return retVal; } //-------------------------------------------------------------------- // Destruction. This occurs after the associated native thread // has died. //-------------------------------------------------------------------- Thread::~Thread() { CONTRACTL { NOTHROW; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; // TODO: enable this //_ASSERTE(GetThread() != this); _ASSERTE(m_ThrewControlForThread == 0); // AbortRequest is coupled with TrapReturningThread. // We should have unmarked the thread for abort. // !!! Can not assert here. If a thread has no managed code on stack // !!! we leave the g_TrapReturningThread set so that the thread will be // !!! aborted if it enters managed code. //_ASSERTE(!IsAbortRequested()); // We should not have the Thread marked for abort. But if we have // we need to unmark it so that g_TrapReturningThreads is decremented. if (IsAbortRequested()) { UnmarkThreadForAbort(); } #if defined(_DEBUG) && defined(TRACK_SYNC) _ASSERTE(IsAtProcessExit() || ((Dbg_TrackSyncStack *) m_pTrackSync)->m_StackPointer == 0); delete m_pTrackSync; #endif // TRACK_SYNC _ASSERTE(IsDead() || IsUnstarted() || IsAtProcessExit()); if (m_WaitEventLink.m_Next != NULL && !IsAtProcessExit()) { WaitEventLink *walk = &m_WaitEventLink; while (walk->m_Next) { ThreadQueue::RemoveThread(this, (SyncBlock*)((DWORD_PTR)walk->m_Next->m_WaitSB & ~1)); StoreEventToEventStore (walk->m_Next->m_EventWait); } m_WaitEventLink.m_Next = NULL; } if (m_StateNC & TSNC_ExistInThreadStore) { BOOL ret; ret = ThreadStore::RemoveThread(this); _ASSERTE(ret); } #ifdef _DEBUG m_pFrame = (Frame *)POISONC; #endif // Normally we shouldn't get here with a valid thread handle; however if SetupThread // failed (due to an OOM for example) then we need to CloseHandle the thread // handle if we own it. if (m_WeOwnThreadHandle && (GetThreadHandle() != INVALID_HANDLE_VALUE)) { CloseHandle(GetThreadHandle()); } if (m_DebugSuspendEvent.IsValid()) { m_DebugSuspendEvent.CloseEvent(); } if (m_EventWait.IsValid()) { m_EventWait.CloseEvent(); } FreeIOCompletionContext(); if (m_OSContext) delete m_OSContext; if (m_pOSContextBuffer) { delete[] m_pOSContextBuffer; m_pOSContextBuffer = NULL; } else if (m_pSavedRedirectContext) { delete m_pSavedRedirectContext; } MarkRedirectContextInUse(m_pSavedRedirectContext); m_pSavedRedirectContext = NULL; #ifdef FEATURE_COMINTEROP if (m_pRCWStack) delete m_pRCWStack; #endif if (m_pExceptionDuringStartup) { Exception::Delete (m_pExceptionDuringStartup); } ClearContext(); if (!IsAtProcessExit()) { // Destroy any handles that we're using to hold onto exception objects SafeSetThrowables(NULL); DestroyShortWeakHandle(m_ExposedObject); DestroyStrongHandle(m_StrongHndToExposedObject); } g_pThinLockThreadIdDispenser->DisposeId(GetThreadId()); if (m_pIBCInfo) { delete m_pIBCInfo; } m_tailCallTls.FreeArgBuffer(); #ifdef FEATURE_EVENT_TRACE // Destruct the thread local type cache for allocation sampling if(m_pAllLoggedTypes) { ETW::TypeSystemLog::DeleteTypeHashNoLock(&m_pAllLoggedTypes); } #endif // FEATURE_EVENT_TRACE // Wait for another thread to leave its loop in DeadlockAwareLock::TryBeginEnterLock CrstHolder lock(&g_DeadlockAwareCrst); } #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT void Thread::BaseCoUninitialize() { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_PREEMPTIVE; _ASSERTE(GetThread() == this); ::CoUninitialize(); }// BaseCoUninitialize #ifdef FEATURE_COMINTEROP void Thread::BaseWinRTUninitialize() { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_PREEMPTIVE; _ASSERTE(WinRTSupported()); _ASSERTE(GetThread() == this); _ASSERTE(IsWinRTInitialized()); RoUninitialize(); } #endif // FEATURE_COMINTEROP void Thread::CoUninitialize() { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; // Running threads might have performed a CoInitialize which must // now be balanced. BOOL needsUninitialize = IsCoInitialized() #ifdef FEATURE_COMINTEROP || IsWinRTInitialized() #endif // FEATURE_COMINTEROP ; if (!IsAtProcessExit() && needsUninitialize) { GCX_PREEMP(); CONTRACT_VIOLATION(ThrowsViolation); if (IsCoInitialized()) { BaseCoUninitialize(); FastInterlockAnd((ULONG *)&m_State, ~TS_CoInitialized); } #ifdef FEATURE_COMINTEROP if (IsWinRTInitialized()) { _ASSERTE(WinRTSupported()); BaseWinRTUninitialize(); ResetWinRTInitialized(); } #endif // FEATURE_COMNITEROP } } #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT void Thread::CleanupDetachedThreads() { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; _ASSERTE(!ThreadStore::HoldingThreadStore()); ThreadStoreLockHolder threadStoreLockHolder; Thread *thread = ThreadStore::GetAllThreadList(NULL, 0, 0); STRESS_LOG0(LF_SYNC, LL_INFO1000, "T::CDT called\n"); while (thread != NULL) { Thread *next = ThreadStore::GetAllThreadList(thread, 0, 0); if (thread->IsDetached()) { STRESS_LOG1(LF_SYNC, LL_INFO1000, "T::CDT - detaching thread 0x%p\n", thread); // Unmark that the thread is detached while we have the // thread store lock. This will ensure that no other // thread will race in here and try to delete it, too. FastInterlockAnd((ULONG*)&(thread->m_State), ~TS_Detached); FastInterlockDecrement(&m_DetachCount); if (!thread->IsBackground()) FastInterlockDecrement(&m_ActiveDetachCount); // If the debugger is attached, then we need to unlock the // thread store before calling OnThreadTerminate. That // way, we won't be holding the thread store lock if we // need to block sending a detach thread event. BOOL debuggerAttached = #ifdef DEBUGGING_SUPPORTED CORDebuggerAttached(); #else // !DEBUGGING_SUPPORTED FALSE; #endif // !DEBUGGING_SUPPORTED if (debuggerAttached) ThreadStore::UnlockThreadStore(); thread->OnThreadTerminate(debuggerAttached ? FALSE : TRUE); #ifdef DEBUGGING_SUPPORTED if (debuggerAttached) { ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_OTHER); // We remember the next Thread in the thread store // list before deleting the current one. But we can't // use that Thread pointer now that we release the // thread store lock in the middle of the loop. We // have to start from the beginning of the list every // time. If two threads T1 and T2 race into // CleanupDetachedThreads, then T1 will grab the first // Thread on the list marked for deletion and release // the lock. T2 will grab the second one on the // list. T2 may complete destruction of its Thread, // then T1 might re-acquire the thread store lock and // try to use the next Thread in the thread store. But // T2 just deleted that next Thread. thread = ThreadStore::GetAllThreadList(NULL, 0, 0); } else #endif // DEBUGGING_SUPPORTED { thread = next; } } else if (thread->HasThreadState(TS_Finalized)) { STRESS_LOG1(LF_SYNC, LL_INFO1000, "T::CDT - finalized thread 0x%p\n", thread); thread->ResetThreadState(TS_Finalized); // We have finalized the managed Thread object. Now it is time to clean up the unmanaged part thread->DecExternalCount(TRUE); thread = next; } else { thread = next; } } s_fCleanFinalizedThread = FALSE; } #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT void Thread::CleanupCOMState() { CONTRACTL { NOTHROW; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; #ifdef FEATURE_COMINTEROP if (GetFinalApartment() == Thread::AS_InSTA) ReleaseRCWsInCachesNoThrow(GetCurrentCtxCookie()); #endif // FEATURE_COMINTEROP // Running threads might have performed a CoInitialize which must // now be balanced. However only the thread that called COInitialize can // call CoUninitialize. BOOL needsUninitialize = IsCoInitialized() #ifdef FEATURE_COMINTEROP || IsWinRTInitialized() #endif // FEATURE_COMINTEROP ; if (needsUninitialize) { GCX_PREEMP(); CONTRACT_VIOLATION(ThrowsViolation); if (IsCoInitialized()) { BaseCoUninitialize(); ResetCoInitialized(); } #ifdef FEATURE_COMINTEROP if (IsWinRTInitialized()) { _ASSERTE(WinRTSupported()); BaseWinRTUninitialize(); ResetWinRTInitialized(); } #endif // FEATURE_COMINTEROP } } #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT // See general comments on thread destruction (code:#threadDestruction) above. void Thread::OnThreadTerminate(BOOL holdingLock) { CONTRACTL { NOTHROW; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; // #ReportDeadOnThreadTerminate // Caller should have put the TS_ReportDead bit on by now. // We don't want any windows after the exit event but before the thread is marked dead. // If a debugger attached during such a window (or even took a dump at the exit event), // then it may not realize the thread is dead. // So ensure we mark the thread as dead before we send the tool notifications. // The TS_ReportDead bit will cause the debugger to view this as TS_Dead. _ASSERTE(HasThreadState(TS_ReportDead)); // Should not use OSThreadId: // OSThreadId may change for the current thread is the thread is blocked and rescheduled // by host. Thread *pCurrentThread = GetThreadNULLOk(); DWORD CurrentThreadID = pCurrentThread?pCurrentThread->GetThreadId():0; DWORD ThisThreadID = GetThreadId(); #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT // If the currently running thread is the thread that died and it is an STA thread, then we // need to release all the RCW's in the current context. However, we cannot do this if we // are in the middle of process detach. if (!IsAtProcessExit() && this == GetThreadNULLOk()) { CleanupCOMState(); } #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT if (g_fEEShutDown != 0) { // We have started shutdown. Not safe to touch CLR state. return; } // We took a count during construction, and we rely on the count being // non-zero as we terminate the thread here. _ASSERTE(m_ExternalRefCount > 0); // The thread is no longer running. It's important that we zero any general OBJECTHANDLE's // on this Thread object. That's because we need the managed Thread object to be subject to // GC and yet any HANDLE is opaque to the GC when it comes to collecting cycles. When the // thread is executing, nothing can be collected anyway. But now that we stop running the // cycle concerns us. // // It's important that we only use OBJECTHANDLE's that are retrievable while the thread is // still running. That's what allows us to zero them here with impunity: { // No handles to clean up in the m_ExceptionState _ASSERTE(!m_ExceptionState.IsExceptionInProgress()); GCX_COOP(); // Destroy the LastThrown handle (and anything that violates the above assert). SafeSetThrowables(NULL); // Free all structures related to thread statics for this thread DeleteThreadStaticData(); } if (GCHeapUtilities::IsGCHeapInitialized()) { // Guaranteed to NOT be a shutdown case, because we tear down the heap before // we tear down any threads during shutdown. if (ThisThreadID == CurrentThreadID) { GCX_COOP(); // GetTotalAllocatedBytes reads dead_threads_non_alloc_bytes, but will suspend EE, being in COOP mode we cannot race with that // however, there could be other threads terminating and doing the same Add. FastInterlockExchangeAddLong((LONG64*)&dead_threads_non_alloc_bytes, m_alloc_context.alloc_limit - m_alloc_context.alloc_ptr); GCHeapUtilities::GetGCHeap()->FixAllocContext(&m_alloc_context, NULL, NULL); m_alloc_context.init(); } } // We switch a thread to dead when it has finished doing useful work. But it // remains in the thread store so long as someone keeps it alive. An exposed // object will do this (it releases the refcount in its finalizer). If the // thread is never released, we have another look during product shutdown and // account for the unreleased refcount of the uncollected exposed object: if (IsDead()) { GCX_COOP(); _ASSERTE(IsAtProcessExit()); ClearContext(); if (m_ExposedObject != NULL) DecExternalCount(holdingLock); // may destruct now } else { #ifdef DEBUGGING_SUPPORTED // // If we're debugging, let the debugger know that this thread is // gone. // // There is a race here where the debugger could have attached after // we checked (and thus didn't release the lock). In this case, // we can't call out to the debugger or we risk a deadlock. // if (!holdingLock && CORDebuggerAttached()) { g_pDebugInterface->DetachThread(this); } #endif // DEBUGGING_SUPPORTED #ifdef PROFILING_SUPPORTED // If a profiler is present, then notify the profiler of thread destroy { BEGIN_PROFILER_CALLBACK(CORProfilerTrackThreads()); GCX_PREEMP(); (&g_profControlBlock)->ThreadDestroyed((ThreadID) this); END_PROFILER_CALLBACK(); } #endif // PROFILING_SUPPORTED if (!holdingLock) { LOG((LF_SYNC, INFO3, "OnThreadTerminate obtain lock\n")); ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_OTHER); } if (GCHeapUtilities::IsGCHeapInitialized() && ThisThreadID != CurrentThreadID) { // We must be holding the ThreadStore lock in order to clean up alloc context. // We should never call FixAllocContext during GC. dead_threads_non_alloc_bytes += m_alloc_context.alloc_limit - m_alloc_context.alloc_ptr; GCHeapUtilities::GetGCHeap()->FixAllocContext(&m_alloc_context, NULL, NULL); m_alloc_context.init(); } FastInterlockOr((ULONG *) &m_State, TS_Dead); ThreadStore::s_pThreadStore->m_DeadThreadCount++; ThreadStore::s_pThreadStore->IncrementDeadThreadCountForGCTrigger(); if (IsUnstarted()) ThreadStore::s_pThreadStore->m_UnstartedThreadCount--; else { if (IsBackground()) ThreadStore::s_pThreadStore->m_BackgroundThreadCount--; } FastInterlockAnd((ULONG *) &m_State, ~(TS_Unstarted | TS_Background)); // // If this thread was told to trip for debugging between the // sending of the detach event above and the locking of the // thread store lock, then remove the flag and decrement the // global trap returning threads count. // if (!IsAtProcessExit()) { // A thread can't die during a GCPending, because the thread store's // lock is held by the GC thread. if (m_State & TS_DebugSuspendPending) UnmarkForSuspension(~TS_DebugSuspendPending); if (CurrentThreadID == ThisThreadID && IsAbortRequested()) { UnmarkThreadForAbort(); } } if (GetThreadHandle() != INVALID_HANDLE_VALUE) { if (m_ThreadHandleForClose == INVALID_HANDLE_VALUE) { m_ThreadHandleForClose = GetThreadHandle(); } SetThreadHandle (INVALID_HANDLE_VALUE); } m_OSThreadId = 0; // If nobody else is holding onto the thread, we may destruct it here: ULONG oldCount = DecExternalCount(TRUE); // If we are shutting down the process, we only have one thread active in the // system. So we can disregard all the reasons that hold this thread alive -- // TLS is about to be reclaimed anyway. if (IsAtProcessExit()) while (oldCount > 0) { oldCount = DecExternalCount(TRUE); } // ASSUME THAT THE THREAD IS DELETED, FROM HERE ON _ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >= 0); _ASSERTE(ThreadStore::s_pThreadStore->m_BackgroundThreadCount >= 0); _ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >= ThreadStore::s_pThreadStore->m_BackgroundThreadCount); _ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >= ThreadStore::s_pThreadStore->m_UnstartedThreadCount); _ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >= ThreadStore::s_pThreadStore->m_DeadThreadCount); // One of the components of OtherThreadsComplete() has changed, so check whether // we should now exit the EE. ThreadStore::CheckForEEShutdown(); if (ThisThreadID == CurrentThreadID) { // NULL out the thread block in the tls. We can't do this if we aren't on the // right thread. But this will only happen during a shutdown. And we've made // a "best effort" to reduce to a single thread before we begin the shutdown. SetThread(NULL); SetAppDomain(NULL); } if (!holdingLock) { LOG((LF_SYNC, INFO3, "OnThreadTerminate releasing lock\n")); ThreadSuspend::UnlockThreadStore(ThisThreadID == CurrentThreadID); } } } // Helper functions to check for duplicate handles. we only do this check if // a waitfor multiple fails. int __cdecl compareHandles( const void *arg1, const void *arg2 ) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; HANDLE h1 = *(HANDLE*)arg1; HANDLE h2 = *(HANDLE*)arg2; return (h1 == h2) ? 0 : ((h1 < h2) ? -1 : 1); } BOOL CheckForDuplicateHandles(int countHandles, HANDLE *handles) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; qsort(handles,countHandles,sizeof(HANDLE),compareHandles); for (int i=1; i < countHandles; i++) { if (handles[i-1] == handles[i]) return TRUE; } return FALSE; } //-------------------------------------------------------------------- // Based on whether this thread has a message pump, do the appropriate // style of Wait. //-------------------------------------------------------------------- DWORD Thread::DoAppropriateWait(int countHandles, HANDLE *handles, BOOL waitAll, DWORD millis, WaitMode mode, PendingSync *syncState) { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; INDEBUG(BOOL alertable = (mode & WaitMode_Alertable) != 0;); _ASSERTE(alertable || syncState == 0); struct Param { Thread *pThis; int countHandles; HANDLE *handles; BOOL waitAll; DWORD millis; WaitMode mode; DWORD dwRet; } param; param.pThis = this; param.countHandles = countHandles; param.handles = handles; param.waitAll = waitAll; param.millis = millis; param.mode = mode; param.dwRet = (DWORD) -1; EE_TRY_FOR_FINALLY(Param *, pParam, &param) { pParam->dwRet = pParam->pThis->DoAppropriateWaitWorker(pParam->countHandles, pParam->handles, pParam->waitAll, pParam->millis, pParam->mode); } EE_FINALLY { if (syncState) { if (!GOT_EXCEPTION() && param.dwRet >= WAIT_OBJECT_0 && param.dwRet < (DWORD)(WAIT_OBJECT_0 + countHandles)) { // This thread has been removed from syncblk waiting list by the signalling thread syncState->Restore(FALSE); } else syncState->Restore(TRUE); } _ASSERTE (param.dwRet != WAIT_IO_COMPLETION); } EE_END_FINALLY; return(param.dwRet); } DWORD Thread::DoAppropriateWait(AppropriateWaitFunc func, void *args, DWORD millis, WaitMode mode, PendingSync *syncState) { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; INDEBUG(BOOL alertable = (mode & WaitMode_Alertable) != 0;); _ASSERTE(alertable || syncState == 0); struct Param { Thread *pThis; AppropriateWaitFunc func; void *args; DWORD millis; WaitMode mode; DWORD dwRet; } param; param.pThis = this; param.func = func; param.args = args; param.millis = millis; param.mode = mode; param.dwRet = (DWORD) -1; EE_TRY_FOR_FINALLY(Param *, pParam, &param) { pParam->dwRet = pParam->pThis->DoAppropriateWaitWorker(pParam->func, pParam->args, pParam->millis, pParam->mode); } EE_FINALLY { if (syncState) { if (!GOT_EXCEPTION() && WAIT_OBJECT_0 == param.dwRet) { // This thread has been removed from syncblk waiting list by the signalling thread syncState->Restore(FALSE); } else syncState->Restore(TRUE); } _ASSERTE (WAIT_IO_COMPLETION != param.dwRet); } EE_END_FINALLY; return(param.dwRet); } #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT //-------------------------------------------------------------------- // helper to do message wait //-------------------------------------------------------------------- DWORD MsgWaitHelper(int numWaiters, HANDLE* phEvent, BOOL bWaitAll, DWORD millis, BOOL bAlertable) { STANDARD_VM_CONTRACT; DWORD flags = 0; DWORD dwReturn=WAIT_ABANDONED; // If we're going to pump, we cannot use WAIT_ALL. That's because the wait would // only be satisfied if a message arrives while the handles are signalled. If we // want true WAIT_ALL, we need to fire up a different thread in the MTA and wait // on its result. This isn't implemented yet. // // A change was added to WaitHandleNative::CorWaitMultipleNative to disable WaitAll // in an STA with more than one handle. if (bWaitAll) { if (numWaiters == 1) bWaitAll = FALSE; // The check that's supposed to prevent this condition from occuring, in WaitHandleNative::CorWaitMultipleNative, // is unfortunately behind FEATURE_COMINTEROP instead of FEATURE_COMINTEROP_APARTMENT_SUPPORT. // So on CoreCLR (where FEATURE_COMINTEROP is not currently defined) we can actually reach this point. // We can't fix this, because it's a breaking change, so we just won't assert here. // The result is that WaitAll on an STA thread in CoreCLR will behave stragely, as described above. } if (bWaitAll) flags |= COWAIT_WAITALL; if (bAlertable) flags |= COWAIT_ALERTABLE; // CoWaitForMultipleHandles does not support more than 63 handles. It returns RPC_S_CALLPENDING for more than 63 handles // that is impossible to differentiate from timeout. if (numWaiters > 63) COMPlusThrow(kNotSupportedException, W("NotSupported_MaxWaitHandles_STA")); HRESULT hr = CoWaitForMultipleHandles(flags, millis, numWaiters, phEvent, &dwReturn); if (hr == RPC_S_CALLPENDING) { dwReturn = WAIT_TIMEOUT; } else if (FAILED(hr)) { // The service behaves differently on an STA vs. MTA in how much // error information it propagates back, and in which form. We currently // only get here in the STA case, so bias this logic that way. dwReturn = WAIT_FAILED; } else { dwReturn += WAIT_OBJECT_0; // success -- bias back } return dwReturn; } #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT //-------------------------------------------------------------------- // Do appropriate wait based on apartment state (STA or MTA) DWORD Thread::DoAppropriateAptStateWait(int numWaiters, HANDLE* pHandles, BOOL bWaitAll, DWORD timeout, WaitMode mode) { STANDARD_VM_CONTRACT; BOOL alertable = (mode & WaitMode_Alertable) != 0; #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT if (alertable && !GetDomain()->MustForceTrivialWaitOperations()) { ApartmentState as = GetFinalApartment(); if (AS_InMTA != as) { return MsgWaitHelper(numWaiters, pHandles, bWaitAll, timeout, alertable); } } #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT return WaitForMultipleObjectsEx(numWaiters, pHandles, bWaitAll, timeout, alertable); } // A helper called by our two flavors of DoAppropriateWaitWorker void Thread::DoAppropriateWaitWorkerAlertableHelper(WaitMode mode) { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; // A word about ordering for Interrupt. If someone tries to interrupt a thread // that's in the interruptible state, we queue an APC. But if they try to interrupt // a thread that's not in the interruptible state, we just record that fact. So // we have to set TS_Interruptible before we test to see whether someone wants to // interrupt us or else we have a race condition that causes us to skip the APC. FastInterlockOr((ULONG *) &m_State, TS_Interruptible); if (HasThreadStateNC(TSNC_InRestoringSyncBlock)) { // The thread is restoring SyncBlock for Object.Wait. ResetThreadStateNC(TSNC_InRestoringSyncBlock); } else { HandleThreadInterrupt(); // Safe to clear the interrupted state, no APC could have fired since we // reset m_UserInterrupt (which inhibits our APC callback from doing // anything). FastInterlockAnd((ULONG *) &m_State, ~TS_Interrupted); } } void MarkOSAlertableWait() { LIMITED_METHOD_CONTRACT; GetThread()->SetThreadStateNC (Thread::TSNC_OSAlertableWait); } void UnMarkOSAlertableWait() { LIMITED_METHOD_CONTRACT; GetThread()->ResetThreadStateNC (Thread::TSNC_OSAlertableWait); } //-------------------------------------------------------------------- // Based on whether this thread has a message pump, do the appropriate // style of Wait. //-------------------------------------------------------------------- DWORD Thread::DoAppropriateWaitWorker(int countHandles, HANDLE *handles, BOOL waitAll, DWORD millis, WaitMode mode) { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; DWORD ret = 0; BOOL alertable = (mode & WaitMode_Alertable) != 0; // Waits from SynchronizationContext.WaitHelper are always just WaitMode_IgnoreSyncCtx. // So if we defer to a sync ctx, we will lose any extra bits. We must therefore not // defer to a sync ctx if doing any non-default wait. // If you're doing a default wait, but want to ignore sync ctx, specify WaitMode_IgnoreSyncCtx // which will make mode != WaitMode_Alertable. BOOL ignoreSyncCtx = (mode != WaitMode_Alertable); if (GetDomain()->MustForceTrivialWaitOperations()) ignoreSyncCtx = TRUE; // Unless the ignoreSyncCtx flag is set, first check to see if there is a synchronization // context on the current thread and if there is, dispatch to it to do the wait. // If the wait is non alertable we cannot forward the call to the sync context // since fundamental parts of the system (such as the GC) rely on non alertable // waits not running any managed code. Also if we are past the point in shutdown were we // are allowed to run managed code then we can't forward the call to the sync context. if (!ignoreSyncCtx && alertable && !HasThreadStateNC(Thread::TSNC_BlockedForShutdown)) { GCX_COOP(); BOOL fSyncCtxPresent = FALSE; OBJECTREF SyncCtxObj = NULL; GCPROTECT_BEGIN(SyncCtxObj) { GetSynchronizationContext(&SyncCtxObj); if (SyncCtxObj != NULL) { SYNCHRONIZATIONCONTEXTREF syncRef = (SYNCHRONIZATIONCONTEXTREF)SyncCtxObj; if (syncRef->IsWaitNotificationRequired()) { fSyncCtxPresent = TRUE; ret = DoSyncContextWait(&SyncCtxObj, countHandles, handles, waitAll, millis); } } } GCPROTECT_END(); if (fSyncCtxPresent) return ret; } // Before going to pre-emptive mode the thread needs to be flagged as waiting for // the debugger. This used to be accomplished by the TS_Interruptible flag but that // doesn't work reliably, see DevDiv Bugs 699245. Some methods call in here already in // COOP mode so we set the bit before the transition. For the calls that are already // in pre-emptive mode those are still buggy. This is only a partial fix. BOOL isCoop = PreemptiveGCDisabled(); ThreadStateNCStackHolder tsNC(isCoop && alertable, TSNC_DebuggerSleepWaitJoin); GCX_PREEMP(); if (alertable) { DoAppropriateWaitWorkerAlertableHelper(mode); } StateHolder<MarkOSAlertableWait,UnMarkOSAlertableWait> OSAlertableWait(alertable); ThreadStateHolder tsh(alertable, TS_Interruptible | TS_Interrupted); ULONGLONG dwStart = 0, dwEnd; retry: if (millis != INFINITE) { dwStart = CLRGetTickCount64(); } ret = DoAppropriateAptStateWait(countHandles, handles, waitAll, millis, mode); if (ret == WAIT_IO_COMPLETION) { _ASSERTE (alertable); if (m_State & TS_Interrupted) { HandleThreadInterrupt(); } // We could be woken by some spurious APC or an EE APC queued to // interrupt us. In the latter case the TS_Interrupted bit will be set // in the thread state bits. Otherwise we just go back to sleep again. if (millis != INFINITE) { dwEnd = CLRGetTickCount64(); if (dwEnd >= dwStart + millis) { ret = WAIT_TIMEOUT; goto WaitCompleted; } else { millis -= (DWORD)(dwEnd - dwStart); } } goto retry; } _ASSERTE((ret >= WAIT_OBJECT_0 && ret < (WAIT_OBJECT_0 + (DWORD)countHandles)) || (ret >= WAIT_ABANDONED && ret < (WAIT_ABANDONED + (DWORD)countHandles)) || (ret == WAIT_TIMEOUT) || (ret == WAIT_FAILED)); // countHandles is used as an unsigned -- it should never be negative. _ASSERTE(countHandles >= 0); // We support precisely one WAIT_FAILED case, where we attempt to wait on a // thread handle and the thread is in the process of dying we might get a // invalid handle substatus. Turn this into a successful wait. // There are three cases to consider: // 1) Only waiting on one handle: return success right away. // 2) Waiting for all handles to be signalled: retry the wait without the // affected handle. // 3) Waiting for one of multiple handles to be signalled: return with the // first handle that is either signalled or has become invalid. if (ret == WAIT_FAILED) { DWORD errorCode = ::GetLastError(); if (errorCode == ERROR_INVALID_PARAMETER) { if (CheckForDuplicateHandles(countHandles, handles)) COMPlusThrow(kDuplicateWaitObjectException); else COMPlusThrowHR(HRESULT_FROM_WIN32(errorCode)); } else if (errorCode == ERROR_ACCESS_DENIED) { // A Win32 ACL could prevent us from waiting on the handle. COMPlusThrow(kUnauthorizedAccessException); } else if (errorCode == ERROR_NOT_ENOUGH_MEMORY) { ThrowOutOfMemory(); } #ifdef TARGET_UNIX else if (errorCode == ERROR_NOT_SUPPORTED) { // "Wait for any" and "wait for all" operations on multiple wait handles are not supported when a cross-process sync // object is included in the array COMPlusThrow(kPlatformNotSupportedException, W("PlatformNotSupported_NamedSyncObjectWaitAnyWaitAll")); } #endif else if (errorCode != ERROR_INVALID_HANDLE) { ThrowWin32(errorCode); } if (countHandles == 1) ret = WAIT_OBJECT_0; else if (waitAll) { // Probe all handles with a timeout of zero. When we find one that's // invalid, move it out of the list and retry the wait. for (int i = 0; i < countHandles; i++) { // WaitForSingleObject won't pump memssage; we already probe enough space // before calling this function and we don't want to fail here, so we don't // do a transition to tolerant code here DWORD subRet = WaitForSingleObject (handles[i], 0); if (subRet != WAIT_FAILED) continue; _ASSERTE(::GetLastError() == ERROR_INVALID_HANDLE); if ((countHandles - i - 1) > 0) memmove(&handles[i], &handles[i+1], (countHandles - i - 1) * sizeof(HANDLE)); countHandles--; break; } // Compute the new timeout value by assume that the timeout // is not large enough for more than one wrap dwEnd = CLRGetTickCount64(); if (millis != INFINITE) { if (dwEnd >= dwStart + millis) { ret = WAIT_TIMEOUT; goto WaitCompleted; } else { millis -= (DWORD)(dwEnd - dwStart); } } goto retry; } else { // Probe all handles with a timeout as zero, succeed with the first // handle that doesn't timeout. ret = WAIT_OBJECT_0; int i; for (i = 0; i < countHandles; i++) { TryAgain: // WaitForSingleObject won't pump memssage; we already probe enough space // before calling this function and we don't want to fail here, so we don't // do a transition to tolerant code here DWORD subRet = WaitForSingleObject (handles[i], 0); if ((subRet == WAIT_OBJECT_0) || (subRet == WAIT_FAILED)) break; if (subRet == WAIT_ABANDONED) { ret = (ret - WAIT_OBJECT_0) + WAIT_ABANDONED; break; } // If we get alerted it just masks the real state of the current // handle, so retry the wait. if (subRet == WAIT_IO_COMPLETION) goto TryAgain; _ASSERTE(subRet == WAIT_TIMEOUT); ret++; } } } WaitCompleted: _ASSERTE((ret != WAIT_TIMEOUT) || (millis != INFINITE)); return ret; } DWORD Thread::DoAppropriateWaitWorker(AppropriateWaitFunc func, void *args, DWORD millis, WaitMode mode) { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; BOOL alertable = (mode & WaitMode_Alertable)!=0; // Before going to pre-emptive mode the thread needs to be flagged as waiting for // the debugger. This used to be accomplished by the TS_Interruptible flag but that // doesn't work reliably, see DevDiv Bugs 699245. Some methods call in here already in // COOP mode so we set the bit before the transition. For the calls that are already // in pre-emptive mode those are still buggy. This is only a partial fix. BOOL isCoop = PreemptiveGCDisabled(); ThreadStateNCStackHolder tsNC(isCoop && alertable, TSNC_DebuggerSleepWaitJoin); GCX_PREEMP(); // <TODO> // @TODO cwb: we don't know whether a thread has a message pump or // how to pump its messages, currently. // @TODO cwb: WinCE isn't going to support Thread.Interrupt() correctly until // we get alertable waits on that platform.</TODO> DWORD ret; if(alertable) { DoAppropriateWaitWorkerAlertableHelper(mode); } DWORD option; if (alertable) { option = WAIT_ALERTABLE; #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT ApartmentState as = GetFinalApartment(); if ((AS_InMTA != as) && !GetDomain()->MustForceTrivialWaitOperations()) { option |= WAIT_MSGPUMP; } #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT } else { option = 0; } ThreadStateHolder tsh(alertable, TS_Interruptible | TS_Interrupted); ULONGLONG dwStart = 0; ULONGLONG dwEnd; retry: if (millis != INFINITE) { dwStart = CLRGetTickCount64(); } ret = func(args, millis, option); if (ret == WAIT_IO_COMPLETION) { _ASSERTE (alertable); if ((m_State & TS_Interrupted)) { HandleThreadInterrupt(); } if (millis != INFINITE) { dwEnd = CLRGetTickCount64(); if (dwEnd >= dwStart + millis) { ret = WAIT_TIMEOUT; goto WaitCompleted; } else { millis -= (DWORD)(dwEnd - dwStart); } } goto retry; } WaitCompleted: _ASSERTE(ret == WAIT_OBJECT_0 || ret == WAIT_ABANDONED || ret == WAIT_TIMEOUT || ret == WAIT_FAILED); _ASSERTE((ret != WAIT_TIMEOUT) || (millis != INFINITE)); return ret; } //-------------------------------------------------------------------- // Only one style of wait for DoSignalAndWait since we don't support this on STA Threads //-------------------------------------------------------------------- DWORD Thread::DoSignalAndWait(HANDLE *handles, DWORD millis, BOOL alertable, PendingSync *syncState) { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; _ASSERTE(alertable || syncState == 0); struct Param { Thread *pThis; HANDLE *handles; DWORD millis; BOOL alertable; DWORD dwRet; } param; param.pThis = this; param.handles = handles; param.millis = millis; param.alertable = alertable; param.dwRet = (DWORD) -1; EE_TRY_FOR_FINALLY(Param *, pParam, &param) { pParam->dwRet = pParam->pThis->DoSignalAndWaitWorker(pParam->handles, pParam->millis, pParam->alertable); } EE_FINALLY { if (syncState) { if (!GOT_EXCEPTION() && WAIT_OBJECT_0 == param.dwRet) { // This thread has been removed from syncblk waiting list by the signalling thread syncState->Restore(FALSE); } else syncState->Restore(TRUE); } _ASSERTE (WAIT_IO_COMPLETION != param.dwRet); } EE_END_FINALLY; return(param.dwRet); } DWORD Thread::DoSignalAndWaitWorker(HANDLE* pHandles, DWORD millis,BOOL alertable) { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; DWORD ret = 0; GCX_PREEMP(); if(alertable) { DoAppropriateWaitWorkerAlertableHelper(WaitMode_None); } StateHolder<MarkOSAlertableWait,UnMarkOSAlertableWait> OSAlertableWait(alertable); ThreadStateHolder tsh(alertable, TS_Interruptible | TS_Interrupted); ULONGLONG dwStart = 0, dwEnd; if (INFINITE != millis) { dwStart = CLRGetTickCount64(); } ret = SignalObjectAndWait(pHandles[0], pHandles[1], millis, alertable); retry: if (WAIT_IO_COMPLETION == ret) { _ASSERTE (alertable); // We could be woken by some spurious APC or an EE APC queued to // interrupt us. In the latter case the TS_Interrupted bit will be set // in the thread state bits. Otherwise we just go back to sleep again. if ((m_State & TS_Interrupted)) { HandleThreadInterrupt(); } if (INFINITE != millis) { dwEnd = CLRGetTickCount64(); if (dwStart + millis <= dwEnd) { ret = WAIT_TIMEOUT; goto WaitCompleted; } else { millis -= (DWORD)(dwEnd - dwStart); } dwStart = CLRGetTickCount64(); } //Retry case we don't want to signal again so only do the wait... ret = WaitForSingleObjectEx(pHandles[1],millis,TRUE); goto retry; } if (WAIT_FAILED == ret) { DWORD errorCode = ::GetLastError(); //If the handle to signal is a mutex and // the calling thread is not the owner, errorCode is ERROR_NOT_OWNER switch(errorCode) { case ERROR_INVALID_HANDLE: case ERROR_NOT_OWNER: case ERROR_ACCESS_DENIED: COMPlusThrowWin32(); break; case ERROR_TOO_MANY_POSTS: ret = ERROR_TOO_MANY_POSTS; break; default: CONSISTENCY_CHECK_MSGF(0, ("This errorCode is not understood '(%d)''\n", errorCode)); COMPlusThrowWin32(); break; } } WaitCompleted: //Check that the return state is valid _ASSERTE(WAIT_OBJECT_0 == ret || WAIT_ABANDONED == ret || WAIT_TIMEOUT == ret || WAIT_FAILED == ret || ERROR_TOO_MANY_POSTS == ret); //Wrong to time out if the wait was infinite _ASSERTE((WAIT_TIMEOUT != ret) || (INFINITE != millis)); return ret; } DWORD Thread::DoSyncContextWait(OBJECTREF *pSyncCtxObj, int countHandles, HANDLE *handles, BOOL waitAll, DWORD millis) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(CheckPointer(handles)); PRECONDITION(IsProtectedByGCFrame (pSyncCtxObj)); } CONTRACTL_END; MethodDescCallSite invokeWaitMethodHelper(METHOD__SYNCHRONIZATION_CONTEXT__INVOKE_WAIT_METHOD_HELPER); BASEARRAYREF handleArrayObj = (BASEARRAYREF)AllocatePrimitiveArray(ELEMENT_TYPE_I, countHandles); memcpyNoGCRefs(handleArrayObj->GetDataPtr(), handles, countHandles * sizeof(HANDLE)); ARG_SLOT args[6] = { ObjToArgSlot(*pSyncCtxObj), ObjToArgSlot(handleArrayObj), BoolToArgSlot(waitAll), (ARG_SLOT)millis, }; // Needed by TriggerGCForMDAInternal to avoid infinite recursion ThreadStateNCStackHolder holder(TRUE, TSNC_InsideSyncContextWait); return invokeWaitMethodHelper.Call_RetI4(args); } // Called out of SyncBlock::Wait() to block this thread until the Notify occurs. BOOL Thread::Block(INT32 timeOut, PendingSync *syncState) { WRAPPER_NO_CONTRACT; _ASSERTE(this == GetThread()); // Before calling Block, the SyncBlock queued us onto it's list of waiting threads. // However, before calling Block the SyncBlock temporarily left the synchronized // region. This allowed threads to enter the region and call Notify, in which // case we may have been signalled before we entered the Wait. So we aren't in the // m_WaitSB list any longer. Not a problem: the following Wait will return // immediately. But it means we cannot enforce the following assertion: // _ASSERTE(m_WaitSB != NULL); return (Wait(syncState->m_WaitEventLink->m_Next->m_EventWait, timeOut, syncState) != WAIT_OBJECT_0); } // Return whether or not a timeout occurred. TRUE=>we waited successfully DWORD Thread::Wait(HANDLE *objs, int cntObjs, INT32 timeOut, PendingSync *syncInfo) { WRAPPER_NO_CONTRACT; DWORD dwResult; DWORD dwTimeOut32; _ASSERTE(timeOut >= 0 || timeOut == INFINITE_TIMEOUT); dwTimeOut32 = (timeOut == INFINITE_TIMEOUT ? INFINITE : (DWORD) timeOut); dwResult = DoAppropriateWait(cntObjs, objs, FALSE /*=waitAll*/, dwTimeOut32, WaitMode_Alertable /*alertable*/, syncInfo); // Either we succeeded in the wait, or we timed out _ASSERTE((dwResult >= WAIT_OBJECT_0 && dwResult < (DWORD)(WAIT_OBJECT_0 + cntObjs)) || (dwResult == WAIT_TIMEOUT)); return dwResult; } // Return whether or not a timeout occurred. TRUE=>we waited successfully DWORD Thread::Wait(CLREvent *pEvent, INT32 timeOut, PendingSync *syncInfo) { WRAPPER_NO_CONTRACT; DWORD dwResult; DWORD dwTimeOut32; _ASSERTE(timeOut >= 0 || timeOut == INFINITE_TIMEOUT); dwTimeOut32 = (timeOut == INFINITE_TIMEOUT ? INFINITE : (DWORD) timeOut); dwResult = pEvent->Wait(dwTimeOut32, TRUE /*alertable*/, syncInfo); // Either we succeeded in the wait, or we timed out _ASSERTE((dwResult == WAIT_OBJECT_0) || (dwResult == WAIT_TIMEOUT)); return dwResult; } void Thread::Wake(SyncBlock *psb) { WRAPPER_NO_CONTRACT; CLREvent* hEvent = NULL; WaitEventLink *walk = &m_WaitEventLink; while (walk->m_Next) { if (walk->m_Next->m_WaitSB == psb) { hEvent = walk->m_Next->m_EventWait; // We are guaranteed that only one thread can change walk->m_Next->m_WaitSB // since the thread is helding the syncblock. walk->m_Next->m_WaitSB = (SyncBlock*)((DWORD_PTR)walk->m_Next->m_WaitSB | 1); break; } #ifdef _DEBUG else if ((SyncBlock*)((DWORD_PTR)walk->m_Next & ~1) == psb) { _ASSERTE (!"Can not wake a thread on the same SyncBlock more than once"); } #endif } PREFIX_ASSUME (hEvent != NULL); hEvent->Set(); } #define WAIT_INTERRUPT_THREADABORT 0x1 #define WAIT_INTERRUPT_INTERRUPT 0x2 #define WAIT_INTERRUPT_OTHEREXCEPTION 0x4 // When we restore DWORD EnterMonitorForRestore(SyncBlock *pSB) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; DWORD state = 0; EX_TRY { pSB->EnterMonitor(); } EX_CATCH { // Assume it is a normal exception unless proven. state = WAIT_INTERRUPT_OTHEREXCEPTION; Thread *pThread = GetThread(); if (pThread->IsAbortInitiated()) { state = WAIT_INTERRUPT_THREADABORT; } else if (__pException != NULL) { if (__pException->GetHR() == COR_E_THREADINTERRUPTED) { state = WAIT_INTERRUPT_INTERRUPT; } } } EX_END_CATCH(SwallowAllExceptions); return state; } // This is the service that backs us out of a wait that we interrupted. We must // re-enter the monitor to the same extent the SyncBlock would, if we returned // through it (instead of throwing through it). And we need to cancel the wait, // if it didn't get notified away while we are processing the interrupt. void PendingSync::Restore(BOOL bRemoveFromSB) { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; _ASSERTE(m_EnterCount); Thread *pCurThread = GetThread(); _ASSERTE (pCurThread == m_OwnerThread); WaitEventLink *pRealWaitEventLink = m_WaitEventLink->m_Next; pRealWaitEventLink->m_RefCount --; if (pRealWaitEventLink->m_RefCount == 0) { if (bRemoveFromSB) { ThreadQueue::RemoveThread(pCurThread, pRealWaitEventLink->m_WaitSB); } if (pRealWaitEventLink->m_EventWait != &pCurThread->m_EventWait) { // Put the event back to the pool. StoreEventToEventStore(pRealWaitEventLink->m_EventWait); } // Remove from the link. m_WaitEventLink->m_Next = m_WaitEventLink->m_Next->m_Next; } // Someone up the stack is responsible for keeping the syncblock alive by protecting // the object that owns it. But this relies on assertions that EnterMonitor is only // called in cooperative mode. Even though we are safe in preemptive, do the // switch. GCX_COOP_THREAD_EXISTS(pCurThread); // We need to make sure that EnterMonitor succeeds. We may have code like // lock (a) // { // a.Wait // } // We need to make sure that the finally from lock is excuted with the lock owned. DWORD state = 0; SyncBlock *psb = (SyncBlock*)((DWORD_PTR)pRealWaitEventLink->m_WaitSB & ~1); for (LONG i=0; i < m_EnterCount;) { if ((state & (WAIT_INTERRUPT_THREADABORT | WAIT_INTERRUPT_INTERRUPT)) != 0) { // If the thread has been interrupted by Thread.Interrupt or Thread.Abort, // disable the check at the beginning of DoAppropriateWait pCurThread->SetThreadStateNC(Thread::TSNC_InRestoringSyncBlock); } DWORD result = EnterMonitorForRestore(psb); if (result == 0) { i++; } else { // We block the thread until the thread acquires the lock. // This is to make sure that when catch/finally is executed, the thread has the lock. // We do not want thread to run its catch/finally if the lock is not taken. state |= result; // If the thread is being rudely aborted, and the thread has // no Cer on stack, we will not run managed code to release the // lock, so we can terminate the loop. if (pCurThread->IsRudeAbortInitiated() && !pCurThread->IsExecutingWithinCer()) { break; } } } pCurThread->ResetThreadStateNC(Thread::TSNC_InRestoringSyncBlock); if ((state & WAIT_INTERRUPT_THREADABORT) != 0) { pCurThread->HandleThreadAbort(); } else if ((state & WAIT_INTERRUPT_INTERRUPT) != 0) { COMPlusThrow(kThreadInterruptedException); } } // This is the callback from the OS, when we queue an APC to interrupt a waiting thread. // The callback occurs on the thread we wish to interrupt. It is a STATIC method. void WINAPI Thread::UserInterruptAPC(ULONG_PTR data) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; _ASSERTE(data == APC_Code); Thread *pCurThread = GetThreadNULLOk(); if (pCurThread) { // We should only take action if an interrupt is currently being // requested (our synchronization does not guarantee that we won't fire // spuriously). It's safe to check the m_UserInterrupt field and then // set TS_Interrupted in a non-atomic fashion because m_UserInterrupt is // only cleared in this thread's context (though it may be set from any // context). if (pCurThread->IsUserInterrupted()) { // Set bit to indicate this routine was called (as opposed to other // generic APCs). FastInterlockOr((ULONG *) &pCurThread->m_State, TS_Interrupted); } } } // This is the workhorse for Thread.Interrupt(). void Thread::UserInterrupt(ThreadInterruptMode mode) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; FastInterlockOr((DWORD*)&m_UserInterrupt, mode); if (HasValidThreadHandle() && HasThreadState (TS_Interruptible)) { Alert(); } } // Implementation of Thread.Sleep(). void Thread::UserSleep(INT32 time) { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; INCONTRACT(_ASSERTE(!GetThread()->GCNoTrigger())); DWORD res; // Before going to pre-emptive mode the thread needs to be flagged as waiting for // the debugger. This used to be accomplished by the TS_Interruptible flag but that // doesn't work reliably, see DevDiv Bugs 699245. ThreadStateNCStackHolder tsNC(TRUE, TSNC_DebuggerSleepWaitJoin); GCX_PREEMP(); // A word about ordering for Interrupt. If someone tries to interrupt a thread // that's in the interruptible state, we queue an APC. But if they try to interrupt // a thread that's not in the interruptible state, we just record that fact. So // we have to set TS_Interruptible before we test to see whether someone wants to // interrupt us or else we have a race condition that causes us to skip the APC. FastInterlockOr((ULONG *) &m_State, TS_Interruptible); // If someone has interrupted us, we should not enter the wait. if (IsUserInterrupted()) { HandleThreadInterrupt(); } ThreadStateHolder tsh(TRUE, TS_Interruptible | TS_Interrupted); FastInterlockAnd((ULONG *) &m_State, ~TS_Interrupted); DWORD dwTime = (DWORD)time; retry: ULONGLONG start = CLRGetTickCount64(); res = ClrSleepEx (dwTime, TRUE); if (res == WAIT_IO_COMPLETION) { // We could be woken by some spurious APC or an EE APC queued to // interrupt us. In the latter case the TS_Interrupted bit will be set // in the thread state bits. Otherwise we just go back to sleep again. if ((m_State & TS_Interrupted)) { HandleThreadInterrupt(); } if (dwTime == INFINITE) { goto retry; } else { ULONGLONG actDuration = CLRGetTickCount64() - start; if (dwTime > actDuration) { dwTime -= (DWORD)actDuration; goto retry; } else { res = WAIT_TIMEOUT; } } } _ASSERTE(res == WAIT_TIMEOUT || res == WAIT_OBJECT_0); } // Correspondence between an EE Thread and an exposed System.Thread: OBJECTREF Thread::GetExposedObject() { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; TRIGGERSGC(); Thread *pCurThread = GetThreadNULLOk(); _ASSERTE (!(pCurThread == NULL || IsAtProcessExit())); _ASSERTE(pCurThread->PreemptiveGCDisabled()); if (ObjectFromHandle(m_ExposedObject) == NULL) { // Allocate the exposed thread object. THREADBASEREF attempt = (THREADBASEREF) AllocateObject(g_pThreadClass); GCPROTECT_BEGIN(attempt); // The exposed object keeps us alive until it is GC'ed. This // doesn't mean the physical thread continues to run, of course. // We have to set this outside of the ThreadStore lock, because this might trigger a GC. attempt->SetInternal(this); BOOL fNeedThreadStore = (! ThreadStore::HoldingThreadStore(pCurThread)); // Take a lock to make sure that only one thread creates the object. ThreadStoreLockHolder tsHolder(fNeedThreadStore); // Check to see if another thread has not already created the exposed object. if (ObjectFromHandle(m_ExposedObject) == NULL) { // Keep a weak reference to the exposed object. StoreObjectInHandle(m_ExposedObject, (OBJECTREF) attempt); ObjectInHandleHolder exposedHolder(m_ExposedObject); // Increase the external ref count. We can't call IncExternalCount because we // already hold the thread lock and IncExternalCount won't be able to take it. ULONG retVal = FastInterlockIncrement ((LONG*)&m_ExternalRefCount); // Check to see if we need to store a strong pointer to the object. if (retVal > 1) StoreObjectInHandle(m_StrongHndToExposedObject, (OBJECTREF) attempt); ObjectInHandleHolder strongHolder(m_StrongHndToExposedObject); attempt->SetManagedThreadId(GetThreadId()); // Note that we are NOT calling the constructor on the Thread. That's // because this is an internal create where we don't want a Start // address. And we don't want to expose such a constructor for our // customers to accidentally call. The following is in lieu of a true // constructor: attempt->InitExisting(); exposedHolder.SuppressRelease(); strongHolder.SuppressRelease(); } else { attempt->ClearInternal(); } GCPROTECT_END(); } return ObjectFromHandle(m_ExposedObject); } // We only set non NULL exposed objects for unstarted threads that haven't exited // their constructor yet. So there are no race conditions. void Thread::SetExposedObject(OBJECTREF exposed) { CONTRACTL { NOTHROW; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; if (exposed != NULL) { _ASSERTE (GetThreadNULLOk() != this); _ASSERTE(IsUnstarted()); _ASSERTE(ObjectFromHandle(m_ExposedObject) == NULL); // The exposed object keeps us alive until it is GC'ed. This doesn't mean the // physical thread continues to run, of course. StoreObjectInHandle(m_ExposedObject, exposed); // This makes sure the contexts on the backing thread // and the managed thread start off in sync with each other. // BEWARE: the IncExternalCount call below may cause GC to happen. // IncExternalCount will store exposed in m_StrongHndToExposedObject which is in default domain. // If the creating thread is killed before the target thread is killed in Thread.Start, Thread object // will be kept alive forever. // Instead, IncExternalCount should be called after the target thread has been started in Thread.Start. // IncExternalCount(); } else { // Simply set both of the handles to NULL. The GC of the old exposed thread // object will take care of decrementing the external ref count. StoreObjectInHandle(m_ExposedObject, NULL); StoreObjectInHandle(m_StrongHndToExposedObject, NULL); } } void Thread::SetLastThrownObject(OBJECTREF throwable, BOOL isUnhandled) { CONTRACTL { if ((throwable == NULL) || CLRException::IsPreallocatedExceptionObject(throwable)) NOTHROW; else THROWS; // From CreateHandle GC_NOTRIGGER; if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE; } CONTRACTL_END; STRESS_LOG_COND1(LF_EH, LL_INFO100, OBJECTREFToObject(throwable) != NULL, "in Thread::SetLastThrownObject: obj = %p\n", OBJECTREFToObject(throwable)); // you can't have a NULL unhandled exception _ASSERTE(!(throwable == NULL && isUnhandled)); if (m_LastThrownObjectHandle != NULL) { // We'll somtimes use a handle for a preallocated exception object. We should never, ever destroy one of // these handles... they'll be destroyed when the Runtime shuts down. if (!CLRException::IsPreallocatedExceptionHandle(m_LastThrownObjectHandle)) { DestroyHandle(m_LastThrownObjectHandle); } m_LastThrownObjectHandle = NULL; // Make sure to set this to NULL here just in case we throw trying to make // a new handle below. } if (throwable != NULL) { _ASSERTE(this == GetThread()); // Non-compliant exceptions are always wrapped. // The use of the ExceptionNative:: helper here (rather than the global ::IsException helper) // is hokey, but we need a GC_NOTRIGGER version and it's only for an ASSERT. _ASSERTE(IsException(throwable->GetMethodTable())); // If we're tracking one of the preallocated exception objects, then just use the global handle that // matches it rather than creating a new one. if (CLRException::IsPreallocatedExceptionObject(throwable)) { m_LastThrownObjectHandle = CLRException::GetPreallocatedHandleForObject(throwable); } else { m_LastThrownObjectHandle = GetDomain()->CreateHandle(throwable); } _ASSERTE(m_LastThrownObjectHandle != NULL); m_ltoIsUnhandled = isUnhandled; } else { m_ltoIsUnhandled = FALSE; } } void Thread::SetSOForLastThrownObject() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; CANNOT_TAKE_LOCK; } CONTRACTL_END; // If we are saving stack overflow exception, we can just null out the current handle. // The current domain is going to be unloaded or the process is going to be killed, so // we will not leak a handle. m_LastThrownObjectHandle = CLRException::GetPreallocatedStackOverflowExceptionHandle(); } // // This is a nice wrapper for SetLastThrownObject which catches any exceptions caused by not being able to create // the handle for the throwable, and setting the last thrown object to the preallocated out of memory exception // instead. // OBJECTREF Thread::SafeSetLastThrownObject(OBJECTREF throwable) { CONTRACTL { NOTHROW; GC_NOTRIGGER; if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE; } CONTRACTL_END; // We return the original throwable if nothing goes wrong. OBJECTREF ret = throwable; EX_TRY { // Try to set the throwable. SetLastThrownObject(throwable); } EX_CATCH { // If it didn't work, then set the last thrown object to the preallocated OOM exception, and return that // object instead of the original throwable. ret = CLRException::GetPreallocatedOutOfMemoryException(); SetLastThrownObject(ret); } EX_END_CATCH(SwallowAllExceptions); return ret; } // // This is a nice wrapper for SetThrowable and SetLastThrownObject, which catches any exceptions caused by not // being able to create the handle for the throwable, and sets the throwable to the preallocated out of memory // exception instead. It also updates the last thrown object, which is always updated when the throwable is // updated. // OBJECTREF Thread::SafeSetThrowables(OBJECTREF throwable DEBUG_ARG(ThreadExceptionState::SetThrowableErrorChecking stecFlags), BOOL isUnhandled) { CONTRACTL { NOTHROW; GC_NOTRIGGER; if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE; } CONTRACTL_END; // We return the original throwable if nothing goes wrong. OBJECTREF ret = throwable; EX_TRY { // Try to set the throwable. SetThrowable(throwable DEBUG_ARG(stecFlags)); // Now, if the last thrown object is different, go ahead and update it. This makes sure that we re-throw // the right object when we rethrow. if (LastThrownObject() != throwable) { SetLastThrownObject(throwable); } if (isUnhandled) { MarkLastThrownObjectUnhandled(); } } EX_CATCH { // If either set didn't work, then set both throwables to the preallocated OOM exception, and return that // object instead of the original throwable. ret = CLRException::GetPreallocatedOutOfMemoryException(); // Neither of these will throw because we're setting with a preallocated exception. SetThrowable(ret DEBUG_ARG(stecFlags)); SetLastThrownObject(ret, isUnhandled); } EX_END_CATCH(SwallowAllExceptions); return ret; } // This method will sync the managed exception state to be in sync with the topmost active exception // for a given thread void Thread::SyncManagedExceptionState(bool fIsDebuggerThread) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; { GCX_COOP(); // Syncup the LastThrownObject on the managed thread SafeUpdateLastThrownObject(); } } void Thread::SetLastThrownObjectHandle(OBJECTHANDLE h) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; if (m_LastThrownObjectHandle != NULL && !CLRException::IsPreallocatedExceptionHandle(m_LastThrownObjectHandle)) { DestroyHandle(m_LastThrownObjectHandle); } m_LastThrownObjectHandle = h; } // // Create a duplicate handle of the current throwable and set the last thrown object to that. This ensures that the // last thrown object and the current throwable have handles that are in the same app domain. // void Thread::SafeUpdateLastThrownObject(void) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; OBJECTHANDLE hThrowable = GetThrowableAsHandle(); if (hThrowable != NULL) { EX_TRY { IGCHandleManager *pHandleTable = GCHandleUtilities::GetGCHandleManager(); // Creating a duplicate handle here ensures that the AD of the last thrown object // matches the domain of the current throwable. OBJECTHANDLE duplicateHandle = pHandleTable->CreateDuplicateHandle(hThrowable); SetLastThrownObjectHandle(duplicateHandle); } EX_CATCH { // If we can't create a duplicate handle, we set both throwables to the preallocated OOM exception. SafeSetThrowables(CLRException::GetPreallocatedOutOfMemoryException()); } EX_END_CATCH(SwallowAllExceptions); } } // Background threads must be counted, because the EE should shut down when the // last non-background thread terminates. But we only count running ones. void Thread::SetBackground(BOOL isBack) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; // booleanize IsBackground() which just returns bits if (isBack == !!IsBackground()) return; BOOL lockHeld = HasThreadStateNC(Thread::TSNC_TSLTakenForStartup); _ASSERTE(!lockHeld || (lockHeld && ThreadStore::HoldingThreadStore())); LOG((LF_SYNC, INFO3, "SetBackground obtain lock\n")); ThreadStoreLockHolder TSLockHolder(!lockHeld); if (IsDead()) { // This can only happen in a race condition, where the correct thing to do // is ignore it. If it happens without the race condition, we throw an // exception. } else if (isBack) { if (!IsBackground()) { FastInterlockOr((ULONG *) &m_State, TS_Background); // unstarted threads don't contribute to the background count if (!IsUnstarted()) ThreadStore::s_pThreadStore->m_BackgroundThreadCount++; // If we put the main thread into a wait, until only background threads exist, // then we make that // main thread a background thread. This cleanly handles the case where it // may or may not be one as it enters the wait. // One of the components of OtherThreadsComplete() has changed, so check whether // we should now exit the EE. ThreadStore::CheckForEEShutdown(); } } else { if (IsBackground()) { FastInterlockAnd((ULONG *) &m_State, ~TS_Background); // unstarted threads don't contribute to the background count if (!IsUnstarted()) ThreadStore::s_pThreadStore->m_BackgroundThreadCount--; _ASSERTE(ThreadStore::s_pThreadStore->m_BackgroundThreadCount >= 0); _ASSERTE(ThreadStore::s_pThreadStore->m_BackgroundThreadCount <= ThreadStore::s_pThreadStore->m_ThreadCount); } } } #ifdef FEATURE_COMINTEROP class ApartmentSpyImpl : public IUnknownCommon<IInitializeSpy, IID_IInitializeSpy> { public: HRESULT STDMETHODCALLTYPE PreInitialize(DWORD dwCoInit, DWORD dwCurThreadAptRefs) { LIMITED_METHOD_CONTRACT; return S_OK; } HRESULT STDMETHODCALLTYPE PostInitialize(HRESULT hrCoInit, DWORD dwCoInit, DWORD dwNewThreadAptRefs) { LIMITED_METHOD_CONTRACT; return hrCoInit; // this HRESULT will be returned from CoInitialize(Ex) } HRESULT STDMETHODCALLTYPE PreUninitialize(DWORD dwCurThreadAptRefs) { // Don't assume that Thread exists and do not create it. STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_PREEMPTIVE; HRESULT hr = S_OK; if (dwCurThreadAptRefs == 1 && !g_fEEShutDown) { // This is the last CoUninitialize on this thread and the CLR is still running. If it's an STA // we take the opportunity to perform COM/WinRT cleanup now, when the apartment is still alive. Thread *pThread = GetThreadNULLOk(); if (pThread != NULL) { BEGIN_EXTERNAL_ENTRYPOINT(&hr) { if (pThread->GetFinalApartment() == Thread::AS_InSTA) { // This will release RCWs and purge the WinRT factory cache on all AppDomains. It // will also synchronize with the finalizer thread which ensures that the RCWs // that were already in the global RCW cleanup list will be cleaned up as well. // ReleaseRCWsInCachesNoThrow(GetCurrentCtxCookie()); } } END_EXTERNAL_ENTRYPOINT; } } return hr; } HRESULT STDMETHODCALLTYPE PostUninitialize(DWORD dwNewThreadAptRefs) { LIMITED_METHOD_CONTRACT; return S_OK; } }; #endif // FEATURE_COMINTEROP void Thread::PrepareApartmentAndContext() { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; #ifdef TARGET_UNIX m_OSThreadId = ::PAL_GetCurrentOSThreadId(); #else m_OSThreadId = ::GetCurrentThreadId(); #endif #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT // Be very careful in here because we haven't set up e.g. TLS yet. if (m_State & (TS_InSTA | TS_InMTA)) { // Make sure TS_InSTA and TS_InMTA aren't both set. _ASSERTE(!((m_State & TS_InSTA) && (m_State & TS_InMTA))); // Determine the apartment state to set based on the requested state. ApartmentState aState = m_State & TS_InSTA ? AS_InSTA : AS_InMTA; // Clear the requested apartment state from the thread. This is requested since // the thread might actually be a fiber that has already been initialized to // a different apartment state than the requested one. If we didn't clear // the requested apartment state, then we could end up with both TS_InSTA and // TS_InMTA set at the same time. FastInterlockAnd ((ULONG *) &m_State, ~TS_InSTA & ~TS_InMTA); // Attempt to set the requested apartment state. SetApartment(aState); } // In the case where we own the thread and we have switched it to a different // starting context, it is the responsibility of the caller (KickOffThread()) // to notice that the context changed, and to adjust the delegate that it will // dispatch on, as appropriate. #endif //FEATURE_COMINTEROP_APARTMENT_SUPPORT #ifdef FEATURE_COMINTEROP // Our IInitializeSpy will be registered in classic processes // only if the internal config switch is on. if (g_pConfig->EnableRCWCleanupOnSTAShutdown()) { NewHolder<ApartmentSpyImpl> pSpyImpl = new ApartmentSpyImpl(); IfFailThrow(CoRegisterInitializeSpy(pSpyImpl, &m_uliInitializeSpyCookie)); pSpyImpl.SuppressRelease(); m_fInitializeSpyRegistered = true; } #endif // FEATURE_COMINTEROP } #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT // TS_InSTA (0x00004000) -> AS_InSTA (0) // TS_InMTA (0x00008000) -> AS_InMTA (1) #define TS_TO_AS(ts) \ (Thread::ApartmentState)((((DWORD)ts) >> 14) - 1) \ // Retrieve the apartment state of the current thread. There are three possible // states: thread hosts an STA, thread is part of the MTA or thread state is // undecided. The last state may indicate that the apartment has not been set at // all (nobody has called CoInitializeEx) or that the EE does not know the // current state (EE has not called CoInitializeEx). Thread::ApartmentState Thread::GetApartment() { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; ApartmentState as = AS_Unknown; ThreadState maskedTs = (ThreadState)(((DWORD)m_State) & (TS_InSTA|TS_InMTA)); if (maskedTs) { _ASSERTE((maskedTs == TS_InSTA) || (maskedTs == TS_InMTA)); static_assert_no_msg(TS_TO_AS(TS_InSTA) == AS_InSTA); static_assert_no_msg(TS_TO_AS(TS_InMTA) == AS_InMTA); as = TS_TO_AS(maskedTs); } if (as != AS_Unknown) { return as; } return GetApartmentRare(as); } Thread::ApartmentState Thread::GetApartmentRare(Thread::ApartmentState as) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; if (this == GetThreadNULLOk()) { THDTYPE type; HRESULT hr = S_OK; if (as == AS_Unknown) { hr = GetCurrentThreadTypeNT5(&type); if (hr == S_OK) { as = (type == THDTYPE_PROCESSMESSAGES) ? AS_InSTA : AS_InMTA; // If we get back THDTYPE_PROCESSMESSAGES, we are guaranteed to // be an STA thread. If not, we are an MTA thread, however // we can't know if the thread has been explicitly set to MTA // (via a call to CoInitializeEx) or if it has been implicitly // made MTA (if it hasn't been CoInitializeEx'd but CoInitialize // has already been called on some other thread in the process. if (as == AS_InSTA) FastInterlockOr((ULONG *) &m_State, AS_InSTA); } } } return as; } // Retrieve the explicit apartment state of the current thread. There are three possible // states: thread hosts an STA, thread is part of the MTA or thread state is // undecided. The last state may indicate that the apartment has not been set at // all (nobody has called CoInitializeEx), the EE does not know the // current state (EE has not called CoInitializeEx), or the thread is implicitly in // the MTA. Thread::ApartmentState Thread::GetExplicitApartment() { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; _ASSERTE(!((m_State & TS_InSTA) && (m_State & TS_InMTA))); // Initialize m_State by calling GetApartment. GetApartment(); ApartmentState as = (m_State & TS_InSTA) ? AS_InSTA : (m_State & TS_InMTA) ? AS_InMTA : AS_Unknown; return as; } Thread::ApartmentState Thread::GetFinalApartment() { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; _ASSERTE(this == GetThread()); ApartmentState as = AS_Unknown; if (g_fEEShutDown) { // On shutdown, do not use cached value. Someone might have called // CoUninitialize. FastInterlockAnd ((ULONG *) &m_State, ~TS_InSTA & ~TS_InMTA); } as = GetApartment(); if (as == AS_Unknown) { // On Win2k and above, GetApartment will only return AS_Unknown if CoInitialize // hasn't been called in the process. In that case we can simply assume MTA. However we // cannot cache this value in the Thread because if a CoInitialize does occur, then the // thread state might change. as = AS_InMTA; } return as; } // when we get apartment tear-down notification, // we want reset the apartment state we cache on the thread VOID Thread::ResetApartment() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // reset the TS_InSTA bit and TS_InMTA bit ThreadState t_State = (ThreadState)(~(TS_InSTA | TS_InMTA)); FastInterlockAnd((ULONG *) &m_State, t_State); } // Attempt to set current thread's apartment state. The actual apartment state // achieved is returned and may differ from the input state if someone managed // to call CoInitializeEx on this thread first (note that calls to SetApartment // made before the thread has started are guaranteed to succeed). Thread::ApartmentState Thread::SetApartment(ApartmentState state) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; // Reset any bits that request for CoInitialize ResetRequiresCoInitialize(); // Setting the state to AS_Unknown indicates we should CoUninitialize // the thread. if (state == AS_Unknown) { BOOL needUninitialize = (m_State & TS_CoInitialized) #ifdef FEATURE_COMINTEROP || IsWinRTInitialized() #endif // FEATURE_COMINTEROP ; if (needUninitialize) { GCX_PREEMP(); // If we haven't CoInitialized the thread, then we don't have anything to do. if (m_State & TS_CoInitialized) { // We should never be attempting to CoUninitialize another thread than // the currently running thread. #ifdef TARGET_UNIX _ASSERTE(m_OSThreadId == ::PAL_GetCurrentOSThreadId()); #else _ASSERTE(m_OSThreadId == ::GetCurrentThreadId()); #endif // CoUninitialize the thread and reset the STA/MTA/CoInitialized state bits. ::CoUninitialize(); ThreadState uninitialized = static_cast<ThreadState>(TS_InSTA | TS_InMTA | TS_CoInitialized); FastInterlockAnd((ULONG *) &m_State, ~uninitialized); } #ifdef FEATURE_COMINTEROP if (IsWinRTInitialized()) { _ASSERTE(WinRTSupported()); BaseWinRTUninitialize(); ResetWinRTInitialized(); } #endif // FEATURE_COMINTEROP } return GetApartment(); } // Call GetApartment to initialize the current apartment state. // // Important note: For Win2k and above this can return AS_InMTA even if the current // thread has never been CoInitialized. Because of this we MUST NOT look at the // return value of GetApartment here. We can however look at the m_State flags // since these will only be set to TS_InMTA if we know for a fact the the // current thread has explicitly been made MTA (via a call to CoInitializeEx). GetApartment(); // If the current thread is STA, then it is impossible to change it to // MTA. if (m_State & TS_InSTA) { return AS_InSTA; } // If the current thread is EXPLICITLY MTA, then it is impossible to change it to // STA. if (m_State & TS_InMTA) { return AS_InMTA; } // If the thread isn't even started yet, we mark the state bits without // calling CoInitializeEx (since we're obviously not in the correct thread // context yet). We'll retry this call when the thread is started. // Don't use the TS_Unstarted state bit to check for this, it's cleared far // too late in the day for us. Instead check whether we're in the correct // thread context. #ifdef TARGET_UNIX if (m_OSThreadId != ::PAL_GetCurrentOSThreadId()) #else if (m_OSThreadId != ::GetCurrentThreadId()) #endif { FastInterlockOr((ULONG *) &m_State, (state == AS_InSTA) ? TS_InSTA : TS_InMTA); return state; } HRESULT hr; { GCX_PREEMP(); // Attempt to set apartment by calling CoInitializeEx. This may fail if // another caller (outside EE) beat us to it. // // Important note: When calling CoInitializeEx(COINIT_MULTITHREADED) on a // thread that has never been CoInitialized, the return value will always // be S_OK, even if another thread in the process has already been // CoInitialized to MTA. However if the current thread has already been // CoInitialized to MTA, then S_FALSE will be returned. hr = ::CoInitializeEx(NULL, (state == AS_InSTA) ? COINIT_APARTMENTTHREADED : COINIT_MULTITHREADED); } if (SUCCEEDED(hr)) { ThreadState t_State = (state == AS_InSTA) ? TS_InSTA : TS_InMTA; if (hr == S_OK) { // The thread has never been CoInitialized. t_State = (ThreadState)(t_State | TS_CoInitialized); } else { _ASSERTE(hr == S_FALSE); // If the thread has already been CoInitialized to the proper mode, then // we don't want to leave an outstanding CoInit so we CoUninit. { GCX_PREEMP(); ::CoUninitialize(); } } // We succeeded in setting the apartment state to the requested state. FastInterlockOr((ULONG *) &m_State, t_State); } else if (hr == RPC_E_CHANGED_MODE) { // We didn't manage to enforce the requested apartment state, but at least // we can work out what the state is now. No need to actually do the CoInit -- // obviously someone else already took care of that. FastInterlockOr((ULONG *) &m_State, ((state == AS_InSTA) ? TS_InMTA : TS_InSTA)); } else if (hr == E_OUTOFMEMORY) { COMPlusThrowOM(); } else if (hr == E_NOTIMPL) { COMPlusThrow(kPlatformNotSupportedException, IDS_EE_THREAD_APARTMENT_NOT_SUPPORTED, (state == AS_InSTA) ? W("STA") : W("MTA")); } else { _ASSERTE(!"Unexpected HRESULT returned from CoInitializeEx!"); } // If WinRT is supported on this OS, also initialize it at the same time. Since WinRT sits on top of COM // we need to make sure that it is initialized in the same threading mode as we just started COM itself // with (or that we detected COM had already been started with). if (WinRTSupported() && !IsWinRTInitialized()) { GCX_PREEMP(); BOOL isSTA = m_State & TS_InSTA; _ASSERTE(isSTA || (m_State & TS_InMTA)); HRESULT hrWinRT = RoInitialize(isSTA ? RO_INIT_SINGLETHREADED : RO_INIT_MULTITHREADED); if (SUCCEEDED(hrWinRT)) { if (hrWinRT == S_OK) { SetThreadStateNC(TSNC_WinRTInitialized); } else { _ASSERTE(hrWinRT == S_FALSE); // If the thread has already been initialized, back it out. We may not // always be able to call RoUninitialize on shutdown so if there's // a way to avoid having to, we should take advantage of that. RoUninitialize(); } } else if (hrWinRT == E_OUTOFMEMORY) { COMPlusThrowOM(); } else { // We don't check for RPC_E_CHANGEDMODE, since we're using the mode that was read in by // initializing COM above. COM and WinRT need to always be in the same mode, so we should never // see that return code at this point. _ASSERTE(!"Unexpected HRESULT From RoInitialize"); } } // Since we've just called CoInitialize, COM has effectively been started up. // To ensure the CLR is aware of this, we need to call EnsureComStarted. EnsureComStarted(FALSE); return GetApartment(); } #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT //---------------------------------------------------------------------------- // // ThreadStore Implementation // //---------------------------------------------------------------------------- ThreadStore::ThreadStore() : m_Crst(CrstThreadStore, (CrstFlags) (CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD)), m_ThreadCount(0), m_MaxThreadCount(0), m_UnstartedThreadCount(0), m_BackgroundThreadCount(0), m_PendingThreadCount(0), m_DeadThreadCount(0), m_DeadThreadCountForGCTrigger(0), m_TriggerGCForDeadThreads(false), m_HoldingThread(0) { CONTRACTL { THROWS; GC_NOTRIGGER; } CONTRACTL_END; m_TerminationEvent.CreateManualEvent(FALSE); _ASSERTE(m_TerminationEvent.IsValid()); } void ThreadStore::InitThreadStore() { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; s_pThreadStore = new ThreadStore; g_pThinLockThreadIdDispenser = new IdDispenser(); ThreadSuspend::g_pGCSuspendEvent = new CLREvent(); ThreadSuspend::g_pGCSuspendEvent->CreateManualEvent(FALSE); s_pWaitForStackCrawlEvent = new CLREvent(); s_pWaitForStackCrawlEvent->CreateManualEvent(FALSE); s_DeadThreadCountThresholdForGCTrigger = static_cast<LONG>(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_Thread_DeadThreadCountThresholdForGCTrigger)); if (s_DeadThreadCountThresholdForGCTrigger < 0) { s_DeadThreadCountThresholdForGCTrigger = 0; } s_DeadThreadGCTriggerPeriodMilliseconds = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_Thread_DeadThreadGCTriggerPeriodMilliseconds); s_DeadThreadGenerationCounts = nullptr; } // Enter and leave the critical section around the thread store. Clients should // use LockThreadStore and UnlockThreadStore because ThreadStore lock has // additional semantics well beyond a normal lock. DEBUG_NOINLINE void ThreadStore::Enter() { CONTRACTL { NOTHROW; GC_NOTRIGGER; // we must be in preemptive mode while taking this lock // if suspension is in progress, the lock is taken, and there is no way to suspend us once we block MODE_PREEMPTIVE; } CONTRACTL_END; ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT; CHECK_ONE_STORE(); m_Crst.Enter(); } DEBUG_NOINLINE void ThreadStore::Leave() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT; CHECK_ONE_STORE(); m_Crst.Leave(); } void ThreadStore::LockThreadStore() { WRAPPER_NO_CONTRACT; // The actual implementation is in ThreadSuspend class since it is coupled // with thread suspension logic ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_OTHER); } void ThreadStore::UnlockThreadStore() { WRAPPER_NO_CONTRACT; // The actual implementation is in ThreadSuspend class since it is coupled // with thread suspension logic ThreadSuspend::UnlockThreadStore(FALSE, ThreadSuspend::SUSPEND_OTHER); } // AddThread adds 'newThread' to m_ThreadList void ThreadStore::AddThread(Thread *newThread) { CONTRACTL { NOTHROW; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; LOG((LF_SYNC, INFO3, "AddThread obtain lock\n")); BOOL lockHeld = newThread->HasThreadStateNC(Thread::TSNC_TSLTakenForStartup); _ASSERTE(!lockHeld || (lockHeld && ThreadStore::HoldingThreadStore())); ThreadStoreLockHolder TSLockHolder(!lockHeld); s_pThreadStore->m_ThreadList.InsertTail(newThread); s_pThreadStore->m_ThreadCount++; if (s_pThreadStore->m_MaxThreadCount < s_pThreadStore->m_ThreadCount) s_pThreadStore->m_MaxThreadCount = s_pThreadStore->m_ThreadCount; if (newThread->IsUnstarted()) s_pThreadStore->m_UnstartedThreadCount++; newThread->SetThreadStateNC(Thread::TSNC_ExistInThreadStore); _ASSERTE(!newThread->IsBackground()); _ASSERTE(!newThread->IsDead()); } // this function is just desgined to avoid deadlocks during abnormal process termination, and should not be used for any other purpose BOOL ThreadStore::CanAcquireLock() { WRAPPER_NO_CONTRACT; { return (s_pThreadStore->m_Crst.m_criticalsection.LockCount == -1 || (size_t)s_pThreadStore->m_Crst.m_criticalsection.OwningThread == (size_t)GetCurrentThreadId()); } } // Whenever one of the components of OtherThreadsComplete() has changed in the // correct direction, see whether we can now shutdown the EE because only background // threads are running. void ThreadStore::CheckForEEShutdown() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; if (g_fWeControlLifetime && s_pThreadStore->OtherThreadsComplete()) { BOOL bRet; bRet = s_pThreadStore->m_TerminationEvent.Set(); _ASSERTE(bRet); } } BOOL ThreadStore::RemoveThread(Thread *target) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; BOOL found; Thread *ret; #if 0 // This assert is not valid when failing to create background GC thread. // Main GC thread holds the TS lock. _ASSERTE (ThreadStore::HoldingThreadStore()); #endif _ASSERTE(s_pThreadStore->m_Crst.GetEnterCount() > 0 || IsAtProcessExit()); _ASSERTE(s_pThreadStore->DbgFindThread(target)); ret = s_pThreadStore->m_ThreadList.FindAndRemove(target); _ASSERTE(ret && ret == target); found = (ret != NULL); if (found) { target->ResetThreadStateNC(Thread::TSNC_ExistInThreadStore); s_pThreadStore->m_ThreadCount--; if (target->IsDead()) { s_pThreadStore->m_DeadThreadCount--; s_pThreadStore->DecrementDeadThreadCountForGCTrigger(); } // Unstarted threads are not in the Background count: if (target->IsUnstarted()) s_pThreadStore->m_UnstartedThreadCount--; else if (target->IsBackground()) s_pThreadStore->m_BackgroundThreadCount--; FastInterlockExchangeAddLong( (LONGLONG *)&Thread::s_workerThreadPoolCompletionCountOverflow, target->m_workerThreadPoolCompletionCount); FastInterlockExchangeAddLong( (LONGLONG *)&Thread::s_ioThreadPoolCompletionCountOverflow, target->m_ioThreadPoolCompletionCount); FastInterlockExchangeAddLong( (LONGLONG *)&Thread::s_monitorLockContentionCountOverflow, target->m_monitorLockContentionCount); _ASSERTE(s_pThreadStore->m_ThreadCount >= 0); _ASSERTE(s_pThreadStore->m_BackgroundThreadCount >= 0); _ASSERTE(s_pThreadStore->m_ThreadCount >= s_pThreadStore->m_BackgroundThreadCount); _ASSERTE(s_pThreadStore->m_ThreadCount >= s_pThreadStore->m_UnstartedThreadCount); _ASSERTE(s_pThreadStore->m_ThreadCount >= s_pThreadStore->m_DeadThreadCount); // One of the components of OtherThreadsComplete() has changed, so check whether // we should now exit the EE. CheckForEEShutdown(); } return found; } // When a thread is created as unstarted. Later it may get started, in which case // someone calls Thread::HasStarted() on that physical thread. This completes // the Setup and calls here. void ThreadStore::TransferStartedThread(Thread *thread) { CONTRACTL { NOTHROW; GC_TRIGGERS; PRECONDITION(thread != NULL); } CONTRACTL_END; _ASSERTE(GetThreadNULLOk() == thread); BOOL lockHeld = thread->HasThreadStateNC(Thread::TSNC_TSLTakenForStartup); // This ASSERT is correct for one of the following reasons. // - The lock is not currently held which means it will be taken below. // - The thread was created in an Unstarted state and the lock is // being held by the creator thread. The only thing we know for sure // is that the lock is held and not by this thread. _ASSERTE(!lockHeld || (lockHeld && !s_pThreadStore->m_holderthreadid.IsUnknown() && ((s_pThreadStore->m_HoldingThread != NULL) || IsGCSpecialThread()) && !ThreadStore::HoldingThreadStore())); LOG((LF_SYNC, INFO3, "TransferStartedThread obtain lock\n")); ThreadStoreLockHolder TSLockHolder(!lockHeld); _ASSERTE(s_pThreadStore->DbgFindThread(thread)); _ASSERTE(thread->HasValidThreadHandle()); _ASSERTE(thread->m_State & Thread::TS_WeOwn); _ASSERTE(thread->IsUnstarted()); _ASSERTE(!thread->IsDead()); // Of course, m_ThreadCount is already correct since it includes started and // unstarted threads. s_pThreadStore->m_UnstartedThreadCount--; // We only count background threads that have been started if (thread->IsBackground()) s_pThreadStore->m_BackgroundThreadCount++; _ASSERTE(s_pThreadStore->m_PendingThreadCount > 0); FastInterlockDecrement(&s_pThreadStore->m_PendingThreadCount); // As soon as we erase this bit, the thread becomes eligible for suspension, // stopping, interruption, etc. FastInterlockAnd((ULONG *) &thread->m_State, ~Thread::TS_Unstarted); FastInterlockOr((ULONG *) &thread->m_State, Thread::TS_LegalToJoin); // One of the components of OtherThreadsComplete() has changed, so check whether // we should now exit the EE. CheckForEEShutdown(); } LONG ThreadStore::s_DeadThreadCountThresholdForGCTrigger = 0; DWORD ThreadStore::s_DeadThreadGCTriggerPeriodMilliseconds = 0; SIZE_T *ThreadStore::s_DeadThreadGenerationCounts = nullptr; void ThreadStore::IncrementDeadThreadCountForGCTrigger() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // Although all increments and decrements are usually done inside a lock, that is not sufficient to synchronize with a // background GC thread resetting this value, hence the interlocked operation. Ignore overflow; overflow would likely never // occur, the count is treated as unsigned, and nothing bad would happen if it were to overflow. SIZE_T count = static_cast<SIZE_T>(FastInterlockIncrement(&m_DeadThreadCountForGCTrigger)); SIZE_T countThreshold = static_cast<SIZE_T>(s_DeadThreadCountThresholdForGCTrigger); if (count < countThreshold || countThreshold == 0) { return; } IGCHeap *gcHeap = GCHeapUtilities::GetGCHeap(); if (gcHeap == nullptr) { return; } SIZE_T gcLastMilliseconds = gcHeap->GetLastGCStartTime(gcHeap->GetMaxGeneration()); SIZE_T gcNowMilliseconds = gcHeap->GetNow(); if (gcNowMilliseconds - gcLastMilliseconds < s_DeadThreadGCTriggerPeriodMilliseconds) { return; } if (!g_fEEStarted) // required for FinalizerThread::EnableFinalization() below { return; } // The GC is triggered on the finalizer thread since it's not safe to trigger it on DLL_THREAD_DETACH. // TriggerGCForDeadThreadsIfNecessary() will determine which generation of GC to trigger, and may not actually trigger a GC. // If a GC is triggered, since there would be a delay before the dead thread count is updated, clear the count and wait for // it to reach the threshold again. If a GC would not be triggered, the count is still cleared here to prevent waking up the // finalizer thread to do the work in TriggerGCForDeadThreadsIfNecessary() for every dead thread. m_DeadThreadCountForGCTrigger = 0; m_TriggerGCForDeadThreads = true; FinalizerThread::EnableFinalization(); } void ThreadStore::DecrementDeadThreadCountForGCTrigger() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // Although all increments and decrements are usually done inside a lock, that is not sufficient to synchronize with a // background GC thread resetting this value, hence the interlocked operation. if (FastInterlockDecrement(&m_DeadThreadCountForGCTrigger) < 0) { m_DeadThreadCountForGCTrigger = 0; } } void ThreadStore::OnMaxGenerationGCStarted() { LIMITED_METHOD_CONTRACT; // A dead thread may contribute to triggering a GC at most once. After a max-generation GC occurs, if some dead thread // objects are still reachable due to references to the thread objects, they will not contribute to triggering a GC again. // Synchronize the store with increment/decrement operations occurring on different threads, and make the change visible to // other threads in order to prevent unnecessary GC triggers. FastInterlockExchange(&m_DeadThreadCountForGCTrigger, 0); } bool ThreadStore::ShouldTriggerGCForDeadThreads() { LIMITED_METHOD_CONTRACT; return m_TriggerGCForDeadThreads; } void ThreadStore::TriggerGCForDeadThreadsIfNecessary() { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; if (!m_TriggerGCForDeadThreads) { return; } m_TriggerGCForDeadThreads = false; if (g_fEEShutDown) { // Not safe to touch CLR state return; } unsigned gcGenerationToTrigger = 0; IGCHeap *gcHeap = GCHeapUtilities::GetGCHeap(); _ASSERTE(gcHeap != nullptr); SIZE_T generationCountThreshold = static_cast<SIZE_T>(s_DeadThreadCountThresholdForGCTrigger) / 2; unsigned maxGeneration = gcHeap->GetMaxGeneration(); if (!s_DeadThreadGenerationCounts) { // initialize this field on first use with an entry for every table. s_DeadThreadGenerationCounts = new (nothrow) SIZE_T[maxGeneration + 1]; if (!s_DeadThreadGenerationCounts) { return; } } memset(s_DeadThreadGenerationCounts, 0, sizeof(SIZE_T) * (maxGeneration + 1)); { ThreadStoreLockHolder threadStoreLockHolder; GCX_COOP(); // Determine the generation for which to trigger a GC. Iterate over all dead threads that have not yet been considered // for triggering a GC and see how many are in which generations. for (Thread *thread = ThreadStore::GetAllThreadList(NULL, Thread::TS_Dead, Thread::TS_Dead); thread != nullptr; thread = ThreadStore::GetAllThreadList(thread, Thread::TS_Dead, Thread::TS_Dead)) { if (thread->HasDeadThreadBeenConsideredForGCTrigger()) { continue; } Object *exposedObject = OBJECTREFToObject(thread->GetExposedObjectRaw()); if (exposedObject == nullptr) { continue; } unsigned exposedObjectGeneration = gcHeap->WhichGeneration(exposedObject); SIZE_T newDeadThreadGenerationCount = ++s_DeadThreadGenerationCounts[exposedObjectGeneration]; if (exposedObjectGeneration > gcGenerationToTrigger && newDeadThreadGenerationCount >= generationCountThreshold) { gcGenerationToTrigger = exposedObjectGeneration; if (gcGenerationToTrigger >= maxGeneration) { break; } } } // Make sure that enough time has elapsed since the last GC of the desired generation. We don't want to trigger GCs // based on this heuristic too often. Give it some time to let the memory pressure trigger GCs automatically, and only // if it doesn't in the given time, this heuristic may kick in to trigger a GC. SIZE_T gcLastMilliseconds = gcHeap->GetLastGCStartTime(gcGenerationToTrigger); SIZE_T gcNowMilliseconds = gcHeap->GetNow(); if (gcNowMilliseconds - gcLastMilliseconds < s_DeadThreadGCTriggerPeriodMilliseconds) { return; } // For threads whose exposed objects are in the generation of GC that will be triggered or in a lower GC generation, // mark them as having contributed to a GC trigger to prevent redundant GC triggers for (Thread *thread = ThreadStore::GetAllThreadList(NULL, Thread::TS_Dead, Thread::TS_Dead); thread != nullptr; thread = ThreadStore::GetAllThreadList(thread, Thread::TS_Dead, Thread::TS_Dead)) { if (thread->HasDeadThreadBeenConsideredForGCTrigger()) { continue; } Object *exposedObject = OBJECTREFToObject(thread->GetExposedObjectRaw()); if (exposedObject == nullptr) { continue; } if (gcGenerationToTrigger < maxGeneration && gcHeap->WhichGeneration(exposedObject) > gcGenerationToTrigger) { continue; } thread->SetHasDeadThreadBeenConsideredForGCTrigger(); } } // ThreadStoreLockHolder, GCX_COOP() GCHeapUtilities::GetGCHeap()->GarbageCollect(gcGenerationToTrigger, FALSE, collection_non_blocking); } #endif // #ifndef DACCESS_COMPILE // Access the list of threads. You must be inside a critical section, otherwise // the "cursor" thread might disappear underneath you. Pass in NULL for the // cursor to begin at the start of the list. Thread *ThreadStore::GetAllThreadList(Thread *cursor, ULONG mask, ULONG bits) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; SUPPORTS_DAC; #ifndef DACCESS_COMPILE _ASSERTE((s_pThreadStore->m_Crst.GetEnterCount() > 0) || IsAtProcessExit()); #endif while (TRUE) { cursor = (cursor ? s_pThreadStore->m_ThreadList.GetNext(cursor) : s_pThreadStore->m_ThreadList.GetHead()); if (cursor == NULL) break; if ((cursor->m_State & mask) == bits) return cursor; } return NULL; } // Iterate over the threads that have been started Thread *ThreadStore::GetThreadList(Thread *cursor) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; SUPPORTS_DAC; return GetAllThreadList(cursor, (Thread::TS_Unstarted | Thread::TS_Dead), 0); } //--------------------------------------------------------------------------------------- // // Grab a consistent snapshot of the thread's state, for reporting purposes only. // // Return Value: // the current state of the thread // Thread::ThreadState Thread::GetSnapshotState() { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; ThreadState res = m_State; if (res & TS_ReportDead) { res = (ThreadState) (res | TS_Dead); } return res; } #ifndef DACCESS_COMPILE BOOL CLREventWaitWithTry(CLREventBase *pEvent, DWORD timeout, BOOL fAlertable, DWORD *pStatus) { CONTRACTL { NOTHROW; WRAPPER(GC_TRIGGERS); } CONTRACTL_END; BOOL fLoop = TRUE; EX_TRY { *pStatus = pEvent->Wait(timeout, fAlertable); fLoop = FALSE; } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions); return fLoop; } // We shut down the EE only when all the non-background threads have terminated // (unless this is an exceptional termination). So the main thread calls here to // wait before tearing down the EE. void ThreadStore::WaitForOtherThreads() { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; CHECK_ONE_STORE(); Thread *pCurThread = GetThread(); // Regardless of whether the main thread is a background thread or not, force // it to be one. This simplifies our rules for counting non-background threads. pCurThread->SetBackground(TRUE); LOG((LF_SYNC, INFO3, "WaitForOtherThreads obtain lock\n")); ThreadStoreLockHolder TSLockHolder(TRUE); if (!OtherThreadsComplete()) { TSLockHolder.Release(); FastInterlockOr((ULONG *) &pCurThread->m_State, Thread::TS_ReportDead); DWORD ret = WAIT_OBJECT_0; while (CLREventWaitWithTry(&m_TerminationEvent, INFINITE, TRUE, &ret)) { } _ASSERTE(ret == WAIT_OBJECT_0); } } #ifdef _DEBUG BOOL ThreadStore::DbgFindThread(Thread *target) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; CHECK_ONE_STORE(); // Cache the current change stamp for g_TrapReturningThreads LONG chgStamp = g_trtChgStamp; STRESS_LOG3(LF_STORE, LL_INFO100, "ThreadStore::DbgFindThread - [thread=%p]. trt=%d. chgStamp=%d\n", GetThreadNULLOk(), g_TrapReturningThreads.Load(), chgStamp); #if 0 // g_TrapReturningThreads debug code. int iRetry = 0; Retry: #endif // g_TrapReturningThreads debug code. BOOL found = FALSE; Thread *cur = NULL; LONG cnt = 0; LONG cntBack = 0; LONG cntUnstart = 0; LONG cntDead = 0; LONG cntReturn = 0; while ((cur = GetAllThreadList(cur, 0, 0)) != NULL) { cnt++; if (cur->IsDead()) cntDead++; // Unstarted threads do not contribute to the count of background threads if (cur->IsUnstarted()) cntUnstart++; else if (cur->IsBackground()) cntBack++; if (cur == target) found = TRUE; // Note that (DebugSuspendPending | SuspendPending) implies a count of 2. // We don't count GCPending because a single trap is held for the entire // GC, instead of counting each interesting thread. if (cur->m_State & Thread::TS_DebugSuspendPending) cntReturn++; if (cur->m_TraceCallCount > 0) cntReturn++; if (cur->IsAbortRequested()) cntReturn++; } _ASSERTE(cnt == m_ThreadCount); _ASSERTE(cntUnstart == m_UnstartedThreadCount); _ASSERTE(cntBack == m_BackgroundThreadCount); _ASSERTE(cntDead == m_DeadThreadCount); _ASSERTE(0 <= m_PendingThreadCount); #if 0 // g_TrapReturningThreads debug code. if (cntReturn != g_TrapReturningThreads /*&& !g_fEEShutDown*/) { // If count is off, try again, to account for multiple threads. if (iRetry < 4) { // printf("Retry %d. cntReturn:%d, gReturn:%d\n", iRetry, cntReturn, g_TrapReturningThreads); ++iRetry; goto Retry; } printf("cnt:%d, Un:%d, Back:%d, Dead:%d, cntReturn:%d, TrapReturn:%d, eeShutdown:%d, threadShutdown:%d\n", cnt,cntUnstart,cntBack,cntDead,cntReturn,g_TrapReturningThreads, g_fEEShutDown, Thread::IsAtProcessExit()); LOG((LF_CORDB, LL_INFO1000, "SUSPEND: cnt:%d, Un:%d, Back:%d, Dead:%d, cntReturn:%d, TrapReturn:%d, eeShutdown:%d, threadShutdown:%d\n", cnt,cntUnstart,cntBack,cntDead,cntReturn,g_TrapReturningThreads, g_fEEShutDown, Thread::IsAtProcessExit()) ); //_ASSERTE(cntReturn + 2 >= g_TrapReturningThreads); } if (iRetry > 0 && iRetry < 4) { printf("%d retries to re-sync counted TrapReturn with global TrapReturn.\n", iRetry); } #endif // g_TrapReturningThreads debug code. STRESS_LOG4(LF_STORE, LL_INFO100, "ThreadStore::DbgFindThread - [thread=%p]. trt=%d. chg=%d. cnt=%d\n", GetThreadNULLOk(), g_TrapReturningThreads.Load(), g_trtChgStamp.Load(), cntReturn); // Because of race conditions and the fact that the GC places its // own count, I can't assert this precisely. But I do want to be // sure that this count isn't wandering ever higher -- with a // nasty impact on the performance of GC mode changes and method // call chaining! // // We don't bother asserting this during process exit, because // during a shutdown we will quietly terminate threads that are // being waited on. (If we aren't shutting down, we carefully // decrement our counts and alert anyone waiting for us to // return). // // Note: we don't actually assert this if // ThreadStore::TrapReturningThreads() updated g_TrapReturningThreads // between the beginning of this function and the moment of the assert. // *** The order of evaluation in the if condition is important *** _ASSERTE( (g_trtChgInFlight != 0 || (cntReturn + 2 >= g_TrapReturningThreads) || chgStamp != g_trtChgStamp) || g_fEEShutDown); return found; } #endif // _DEBUG void Thread::HandleThreadInterrupt () { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; // If we're waiting for shutdown, we don't want to abort/interrupt this thread if (HasThreadStateNC(Thread::TSNC_BlockedForShutdown)) return; if ((m_UserInterrupt & TI_Abort) != 0) { HandleThreadAbort(); } if ((m_UserInterrupt & TI_Interrupt) != 0) { ResetThreadState ((ThreadState)(TS_Interrupted | TS_Interruptible)); FastInterlockAnd ((DWORD*)&m_UserInterrupt, ~TI_Interrupt); COMPlusThrow(kThreadInterruptedException); } } #ifdef _DEBUG #define MAXSTACKBYTES (2 * GetOsPageSize()) void CleanStackForFastGCStress () { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; PVOID StackLimit = ClrTeb::GetStackLimit(); size_t nBytes = (size_t)&nBytes - (size_t)StackLimit; nBytes &= ~sizeof (size_t); if (nBytes > MAXSTACKBYTES) { nBytes = MAXSTACKBYTES; } size_t* buffer = (size_t*) _alloca (nBytes); memset(buffer, 0, nBytes); GetThread()->m_pCleanedStackBase = &nBytes; } void Thread::ObjectRefFlush(Thread* thread) { // this is debug only code, so no need to validate STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_ENTRY_POINT; _ASSERTE(thread->PreemptiveGCDisabled()); // Should have been in managed code memset(thread->dangerousObjRefs, 0, sizeof(thread->dangerousObjRefs)); thread->m_allObjRefEntriesBad = FALSE; CLEANSTACKFORFASTGCSTRESS (); } #endif #if defined(STRESS_HEAP) PtrHashMap *g_pUniqueStackMap = NULL; Crst *g_pUniqueStackCrst = NULL; #define UniqueStackDepth 8 BOOL StackCompare (UPTR val1, UPTR val2) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; size_t *p1 = (size_t *)(val1 << 1); size_t *p2 = (size_t *)val2; if (p1[0] != p2[0]) { return FALSE; } size_t nElem = p1[0]; if (nElem >= UniqueStackDepth) { nElem = UniqueStackDepth; } p1 ++; p2 ++; for (size_t n = 0; n < nElem; n ++) { if (p1[n] != p2[n]) { return FALSE; } } return TRUE; } void UniqueStackSetupMap() { WRAPPER_NO_CONTRACT; if (g_pUniqueStackCrst == NULL) { Crst *Attempt = new Crst ( CrstUniqueStack, CrstFlags(CRST_REENTRANCY | CRST_UNSAFE_ANYMODE)); if (FastInterlockCompareExchangePointer(&g_pUniqueStackCrst, Attempt, NULL) != NULL) { // We lost the race delete Attempt; } } // Now we have a Crst we can use to synchronize the remainder of the init. if (g_pUniqueStackMap == NULL) { CrstHolder ch(g_pUniqueStackCrst); if (g_pUniqueStackMap == NULL) { PtrHashMap *map = new (SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()) PtrHashMap (); LockOwner lock = {g_pUniqueStackCrst, IsOwnerOfCrst}; map->Init (256, StackCompare, TRUE, &lock); g_pUniqueStackMap = map; } } } BOOL StartUniqueStackMapHelper() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; BOOL fOK = TRUE; EX_TRY { if (g_pUniqueStackMap == NULL) { UniqueStackSetupMap(); } } EX_CATCH { fOK = FALSE; } EX_END_CATCH(SwallowAllExceptions); return fOK; } BOOL StartUniqueStackMap () { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; return StartUniqueStackMapHelper(); } #ifndef TARGET_UNIX size_t UpdateStackHash(size_t hash, size_t retAddr) { return ((hash << 3) + hash) ^ retAddr; } /***********************************************************************/ size_t getStackHash(size_t* stackTrace, size_t* stackTop, size_t* stackStop, size_t stackBase, size_t stackLimit) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // return a hash of every return address found between 'stackTop' (the lowest address) // and 'stackStop' (the highest address) size_t hash = 0; int idx = 0; #ifdef TARGET_X86 static size_t moduleBase = (size_t) -1; static size_t moduleTop = (size_t) -1; if (moduleTop == (size_t) -1) { MEMORY_BASIC_INFORMATION mbi; if (ClrVirtualQuery(getStackHash, &mbi, sizeof(mbi))) { moduleBase = (size_t)mbi.AllocationBase; moduleTop = (size_t)mbi.BaseAddress + mbi.RegionSize; } else { // way bad error, probably just assert and exit _ASSERTE (!"ClrVirtualQuery failed"); moduleBase = 0; moduleTop = 0; } } while (stackTop < stackStop) { // Clean out things that point to stack, as those can't be return addresses if (*stackTop > moduleBase && *stackTop < moduleTop) { TADDR dummy; if (isRetAddr((TADDR)*stackTop, &dummy)) { hash = UpdateStackHash(hash, *stackTop); // If there is no jitted code on the stack, then just use the // top 16 frames as the context. idx++; if (idx <= UniqueStackDepth) { stackTrace [idx] = *stackTop; } } } stackTop++; } #else // TARGET_X86 CONTEXT ctx; ClrCaptureContext(&ctx); UINT_PTR uControlPc = (UINT_PTR)GetIP(&ctx); UINT_PTR uImageBase; UINT_PTR uPrevControlPc = uControlPc; for (;;) { RtlLookupFunctionEntry(uControlPc, ARM_ONLY((DWORD*))(&uImageBase), NULL ); if (((UINT_PTR)GetClrModuleBase()) != uImageBase) { break; } uControlPc = Thread::VirtualUnwindCallFrame(&ctx); UINT_PTR uRetAddrForHash = uControlPc; if (uPrevControlPc == uControlPc) { // This is a special case when we fail to acquire the loader lock // in RtlLookupFunctionEntry(), which then returns false. The end // result is that we cannot go any further on the stack and // we will loop infinitely (because the owner of the loader lock // is blocked on us). hash = 0; break; } else { uPrevControlPc = uControlPc; } hash = UpdateStackHash(hash, uRetAddrForHash); // If there is no jitted code on the stack, then just use the // top 16 frames as the context. idx++; if (idx <= UniqueStackDepth) { stackTrace [idx] = uRetAddrForHash; } } #endif // TARGET_X86 stackTrace [0] = idx; return(hash); } void UniqueStackHelper(size_t stackTraceHash, size_t *stackTrace) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; EX_TRY { size_t nElem = stackTrace[0]; if (nElem >= UniqueStackDepth) { nElem = UniqueStackDepth; } AllocMemHolder<size_t> stackTraceInMap = SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(size_t *)) * (S_SIZE_T(nElem) + S_SIZE_T(1))); memcpy (stackTraceInMap, stackTrace, sizeof(size_t *) * (nElem + 1)); g_pUniqueStackMap->InsertValue(stackTraceHash, stackTraceInMap); stackTraceInMap.SuppressRelease(); } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions); } /***********************************************************************/ /* returns true if this stack has not been seen before, useful for running tests only once per stack trace. */ BOOL Thread::UniqueStack(void* stackStart) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // If we where not told where to start, start at the caller of UniqueStack if (stackStart == 0) { stackStart = &stackStart; } if (g_pUniqueStackMap == NULL) { if (!StartUniqueStackMap ()) { // We fail to initialize unique stack map due to OOM. // Let's say the stack is unique. return TRUE; } } size_t stackTrace[UniqueStackDepth+1] = {0}; // stackTraceHash represents a hash of entire stack at the time we make the call, // We insure at least GC per unique stackTrace. What information is contained in // 'stackTrace' is somewhat arbitrary. We choose it to mean all functions live // on the stack up to the first jitted function. size_t stackTraceHash; Thread* pThread = GetThread(); void* stopPoint = pThread->m_CacheStackBase; #ifdef TARGET_X86 // Find the stop point (most jitted function) Frame* pFrame = pThread->GetFrame(); for(;;) { // skip GC frames if (pFrame == 0 || pFrame == (Frame*) -1) break; pFrame->GetFunction(); // This insures that helper frames are inited if (pFrame->GetReturnAddress() != 0) { stopPoint = pFrame; break; } pFrame = pFrame->Next(); } #endif // TARGET_X86 // Get hash of all return addresses between here an the top most jitted function stackTraceHash = getStackHash (stackTrace, (size_t*) stackStart, (size_t*) stopPoint, size_t(pThread->m_CacheStackBase), size_t(pThread->m_CacheStackLimit)); if (stackTraceHash == 0 || g_pUniqueStackMap->LookupValue (stackTraceHash, stackTrace) != (LPVOID)INVALIDENTRY) { return FALSE; } BOOL fUnique = FALSE; { CrstHolder ch(g_pUniqueStackCrst); #ifdef _DEBUG if (GetThreadNULLOk()) GetThread()->m_bUniqueStacking = TRUE; #endif if (g_pUniqueStackMap->LookupValue (stackTraceHash, stackTrace) != (LPVOID)INVALIDENTRY) { fUnique = FALSE; } else { fUnique = TRUE; FAULT_NOT_FATAL(); UniqueStackHelper(stackTraceHash, stackTrace); } #ifdef _DEBUG if (GetThreadNULLOk()) GetThread()->m_bUniqueStacking = FALSE; #endif } #ifdef _DEBUG static int fCheckStack = -1; if (fCheckStack == -1) { fCheckStack = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_FastGCCheckStack); } if (fCheckStack && pThread->m_pCleanedStackBase > stackTrace && pThread->m_pCleanedStackBase - stackTrace > (int) MAXSTACKBYTES) { _ASSERTE (!"Garbage on stack"); } #endif return fUnique; } #else // !TARGET_UNIX BOOL Thread::UniqueStack(void* stackStart) { return FALSE; } #endif // !TARGET_UNIX #endif // STRESS_HEAP /* * GetStackLowerBound * * Returns the lower bound of the stack space. Note -- the practical bound is some number of pages greater than * this value -- those pages are reserved for a stack overflow exception processing. * * Parameters: * None * * Returns: * address of the lower bound of the threads's stack. */ void * Thread::GetStackLowerBound() { // Called during fiber switch. Can not have non-static contract. STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; #ifndef TARGET_UNIX MEMORY_BASIC_INFORMATION lowerBoundMemInfo; SIZE_T dwRes; dwRes = ClrVirtualQuery((const void *)&lowerBoundMemInfo, &lowerBoundMemInfo, sizeof(MEMORY_BASIC_INFORMATION)); if (sizeof(MEMORY_BASIC_INFORMATION) == dwRes) { return (void *)(lowerBoundMemInfo.AllocationBase); } else { return NULL; } #else // !TARGET_UNIX return PAL_GetStackLimit(); #endif // !TARGET_UNIX } /* * GetStackUpperBound * * Return the upper bound of the thread's stack space. * * Parameters: * None * * Returns: * address of the base of the threads's stack. */ void *Thread::GetStackUpperBound() { // Called during fiber switch. Can not have non-static contract. STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; return ClrTeb::GetStackBase(); } BOOL Thread::SetStackLimits(SetStackLimitScope scope) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; if (scope == fAll) { m_CacheStackBase = GetStackUpperBound(); m_CacheStackLimit = GetStackLowerBound(); if (m_CacheStackLimit == NULL) { _ASSERTE(!"Failed to set stack limits"); return FALSE; } // Compute the limit used by EnsureSufficientExecutionStack and cache it on the thread. This minimum stack size should // be sufficient to allow a typical non-recursive call chain to execute, including potential exception handling and // garbage collection. Used for probing for available stack space through RuntimeImports.EnsureSufficientExecutionStack, // among other things. #ifdef HOST_64BIT const UINT_PTR MinExecutionStackSize = 128 * 1024; #else // !HOST_64BIT const UINT_PTR MinExecutionStackSize = 64 * 1024; #endif // HOST_64BIT _ASSERTE(m_CacheStackBase >= m_CacheStackLimit); if ((reinterpret_cast<UINT_PTR>(m_CacheStackBase) - reinterpret_cast<UINT_PTR>(m_CacheStackLimit)) > MinExecutionStackSize) { m_CacheStackSufficientExecutionLimit = reinterpret_cast<UINT_PTR>(m_CacheStackLimit) + MinExecutionStackSize; } else { m_CacheStackSufficientExecutionLimit = reinterpret_cast<UINT_PTR>(m_CacheStackBase); } // Compute the limit used by CheckCanUseStackAllocand cache it on the thread. This minimum stack size should // be sufficient to avoid all significant risk of a moderate size stack alloc interfering with application behavior const UINT_PTR StackAllocNonRiskyExecutionStackSize = 512 * 1024; _ASSERTE(m_CacheStackBase >= m_CacheStackLimit); if ((reinterpret_cast<UINT_PTR>(m_CacheStackBase) - reinterpret_cast<UINT_PTR>(m_CacheStackLimit)) > StackAllocNonRiskyExecutionStackSize) { m_CacheStackStackAllocNonRiskyExecutionLimit = reinterpret_cast<UINT_PTR>(m_CacheStackLimit) + StackAllocNonRiskyExecutionStackSize; } else { m_CacheStackStackAllocNonRiskyExecutionLimit = reinterpret_cast<UINT_PTR>(m_CacheStackBase); } } // Ensure that we've setup the stack guarantee properly before we cache the stack limits // as they depend upon the stack guarantee. if (FAILED(CLRSetThreadStackGuarantee())) return FALSE; return TRUE; } //--------------------------------------------------------------------------------------------- // Routines we use to managed a thread's stack, for fiber switching or stack overflow purposes. //--------------------------------------------------------------------------------------------- HRESULT Thread::CLRSetThreadStackGuarantee(SetThreadStackGuaranteeScope fScope) { CONTRACTL { WRAPPER(NOTHROW); GC_NOTRIGGER; } CONTRACTL_END; #ifndef TARGET_UNIX // TODO: we need to measure what the stack usage needs are at the limits in the hosted scenario for host callbacks if (Thread::IsSetThreadStackGuaranteeInUse(fScope)) { // <TODO> Tune this as needed </TODO> ULONG uGuardSize = SIZEOF_DEFAULT_STACK_GUARANTEE; int EXTRA_PAGES = 0; #if defined(HOST_64BIT) // Free Build EH Stack Stats: // -------------------------------- // currently the maximum stack usage we'll face while handling a SO includes: // 4.3k for the OS (kernel32!RaiseException, Rtl EH dispatch code, RtlUnwindEx [second pass]) // 1.2k for the CLR EH setup (NakedThrowHelper*) // 4.5k for other heavy CLR stack creations (2x CONTEXT, 1x REGDISPLAY) // ~1.0k for other misc CLR stack allocations // ----- // 11.0k --> ~2.75 pages for CLR SO EH dispatch // // -plus we might need some more for debugger EH dispatch, Watson, etc... // -also need to take into account that we can lose up to 1 page of the guard region // -additionally, we need to provide some region to hosts to allow for lock acquisition in a hosted scenario // EXTRA_PAGES = 3; INDEBUG(EXTRA_PAGES += 1); int ThreadGuardPages = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ThreadGuardPages); if (ThreadGuardPages == 0) { uGuardSize += (EXTRA_PAGES * GetOsPageSize()); } else { uGuardSize += (ThreadGuardPages * GetOsPageSize()); } #else // HOST_64BIT #ifdef _DEBUG uGuardSize += (1 * GetOsPageSize()); // one extra page for debug infrastructure #endif // _DEBUG #endif // HOST_64BIT LOG((LF_EH, LL_INFO10000, "STACKOVERFLOW: setting thread stack guarantee to 0x%x\n", uGuardSize)); if (!::SetThreadStackGuarantee(&uGuardSize)) { return HRESULT_FROM_GetLastErrorNA(); } } #endif // !TARGET_UNIX return S_OK; } /* * GetLastNormalStackAddress * * GetLastNormalStackAddress returns the last stack address before the guard * region of a thread. This is the last address that one could write to before * a stack overflow occurs. * * Parameters: * StackLimit - the base of the stack allocation * * Returns: * Address of the first page of the guard region. */ UINT_PTR Thread::GetLastNormalStackAddress(UINT_PTR StackLimit) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; UINT_PTR cbStackGuarantee = GetStackGuarantee(); // Here we take the "hard guard region size", the "stack guarantee" and the "fault page" and add them // all together. Note that the "fault page" is the reason for the extra GetOsPageSize() below. The OS // will guarantee us a certain amount of stack remaining after a stack overflow. This is called the // "stack guarantee". But to do this, it has to fault on the page before that region as the app is // allowed to fault at the very end of that page. So, as a result, the last normal stack address is // one page sooner. return StackLimit + (cbStackGuarantee #ifndef TARGET_UNIX + GetOsPageSize() #endif // !TARGET_UNIX + HARD_GUARD_REGION_SIZE); } #ifdef _DEBUG static void DebugLogMBIFlags(UINT uState, UINT uProtect) { CONTRACTL { NOTHROW; GC_NOTRIGGER; CANNOT_TAKE_LOCK; } CONTRACTL_END; #ifndef TARGET_UNIX #define LOG_FLAG(flags, name) \ if (flags & name) \ { \ LOG((LF_EH, LL_INFO1000, "" #name " ")); \ } \ if (uState) { LOG((LF_EH, LL_INFO1000, "State: ")); LOG_FLAG(uState, MEM_COMMIT); LOG_FLAG(uState, MEM_RESERVE); LOG_FLAG(uState, MEM_DECOMMIT); LOG_FLAG(uState, MEM_RELEASE); LOG_FLAG(uState, MEM_FREE); LOG_FLAG(uState, MEM_PRIVATE); LOG_FLAG(uState, MEM_MAPPED); LOG_FLAG(uState, MEM_RESET); LOG_FLAG(uState, MEM_TOP_DOWN); LOG_FLAG(uState, MEM_WRITE_WATCH); LOG_FLAG(uState, MEM_PHYSICAL); LOG_FLAG(uState, MEM_LARGE_PAGES); LOG_FLAG(uState, MEM_4MB_PAGES); } if (uProtect) { LOG((LF_EH, LL_INFO1000, "Protect: ")); LOG_FLAG(uProtect, PAGE_NOACCESS); LOG_FLAG(uProtect, PAGE_READONLY); LOG_FLAG(uProtect, PAGE_READWRITE); LOG_FLAG(uProtect, PAGE_WRITECOPY); LOG_FLAG(uProtect, PAGE_EXECUTE); LOG_FLAG(uProtect, PAGE_EXECUTE_READ); LOG_FLAG(uProtect, PAGE_EXECUTE_READWRITE); LOG_FLAG(uProtect, PAGE_EXECUTE_WRITECOPY); LOG_FLAG(uProtect, PAGE_GUARD); LOG_FLAG(uProtect, PAGE_NOCACHE); LOG_FLAG(uProtect, PAGE_WRITECOMBINE); } #undef LOG_FLAG #endif // !TARGET_UNIX } static void DebugLogStackRegionMBIs(UINT_PTR uLowAddress, UINT_PTR uHighAddress) { CONTRACTL { NOTHROW; GC_NOTRIGGER; CANNOT_TAKE_LOCK; } CONTRACTL_END; MEMORY_BASIC_INFORMATION meminfo; UINT_PTR uStartOfThisRegion = uLowAddress; LOG((LF_EH, LL_INFO1000, "----------------------------------------------------------------------\n")); while (uStartOfThisRegion < uHighAddress) { SIZE_T res = ClrVirtualQuery((const void *)uStartOfThisRegion, &meminfo, sizeof(meminfo)); if (sizeof(meminfo) != res) { LOG((LF_EH, LL_INFO1000, "VirtualQuery failed on %p\n", uStartOfThisRegion)); break; } UINT_PTR uStartOfNextRegion = uStartOfThisRegion + meminfo.RegionSize; if (uStartOfNextRegion > uHighAddress) { uStartOfNextRegion = uHighAddress; } UINT_PTR uRegionSize = uStartOfNextRegion - uStartOfThisRegion; LOG((LF_EH, LL_INFO1000, "0x%p -> 0x%p (%d pg) ", uStartOfThisRegion, uStartOfNextRegion - 1, uRegionSize / GetOsPageSize())); DebugLogMBIFlags(meminfo.State, meminfo.Protect); LOG((LF_EH, LL_INFO1000, "\n")); uStartOfThisRegion = uStartOfNextRegion; } LOG((LF_EH, LL_INFO1000, "----------------------------------------------------------------------\n")); } // static void Thread::DebugLogStackMBIs() { CONTRACTL { NOTHROW; GC_NOTRIGGER; CANNOT_TAKE_LOCK; } CONTRACTL_END; Thread* pThread = GetThreadNULLOk(); // N.B. this can be NULL! UINT_PTR uStackLimit = (UINT_PTR)GetStackLowerBound(); UINT_PTR uStackBase = (UINT_PTR)GetStackUpperBound(); if (pThread) { uStackLimit = (UINT_PTR)pThread->GetCachedStackLimit(); uStackBase = (UINT_PTR)pThread->GetCachedStackBase(); } else { uStackLimit = (UINT_PTR)GetStackLowerBound(); uStackBase = (UINT_PTR)GetStackUpperBound(); } UINT_PTR uStackSize = uStackBase - uStackLimit; LOG((LF_EH, LL_INFO1000, "----------------------------------------------------------------------\n")); LOG((LF_EH, LL_INFO1000, "Stack Snapshot 0x%p -> 0x%p (%d pg)\n", uStackLimit, uStackBase, uStackSize / GetOsPageSize())); if (pThread) { LOG((LF_EH, LL_INFO1000, "Last normal addr: 0x%p\n", pThread->GetLastNormalStackAddress())); } DebugLogStackRegionMBIs(uStackLimit, uStackBase); } #endif // _DEBUG NOINLINE void AllocateSomeStack(){ LIMITED_METHOD_CONTRACT; #ifdef TARGET_X86 const size_t size = 0x200; #else //TARGET_X86 const size_t size = 0x400; #endif //TARGET_X86 INT8* mem = (INT8*)_alloca(size); // Actually touch the memory we just allocated so the compiler can't // optimize it away completely. // NOTE: this assumes the stack grows down (towards 0). VolatileStore<INT8>(mem, 0); } #ifndef TARGET_UNIX // static // private BOOL Thread::DoesRegionContainGuardPage(UINT_PTR uLowAddress, UINT_PTR uHighAddress) { CONTRACTL { NOTHROW; GC_NOTRIGGER; CANNOT_TAKE_LOCK; } CONTRACTL_END; SIZE_T dwRes; MEMORY_BASIC_INFORMATION meminfo; UINT_PTR uStartOfCurrentRegion = uLowAddress; while (uStartOfCurrentRegion < uHighAddress) { #undef VirtualQuery // This code can run below YieldTask, which means that it must not call back into the host. // The reason is that YieldTask is invoked by the host, and the host needs not be reentrant. dwRes = VirtualQuery((const void *)uStartOfCurrentRegion, &meminfo, sizeof(meminfo)); #define VirtualQuery(lpAddress, lpBuffer, dwLength) Dont_Use_VirtualQuery(lpAddress, lpBuffer, dwLength) // If the query fails then assume we have no guard page. if (sizeof(meminfo) != dwRes) { return FALSE; } if (meminfo.Protect & PAGE_GUARD) { return TRUE; } uStartOfCurrentRegion += meminfo.RegionSize; } return FALSE; } #endif // !TARGET_UNIX /* * DetermineIfGuardPagePresent * * DetermineIfGuardPagePresent returns TRUE if the thread's stack contains a proper guard page. This function makes * a physical check of the stack, rather than relying on whether or not the CLR is currently processing a stack * overflow exception. * * It seems reasonable to want to check just the 3rd page for !MEM_COMMIT or PAGE_GUARD, but that's no good in a * world where a) one can extend the guard region arbitrarily with SetThreadStackGuarantee(), b) a thread's stack * could be pre-committed, and c) another lib might reset the guard page very high up on the stack, much as we * do. In that world, we have to do VirtualQuery from the lower bound up until we find a region with PAGE_GUARD on * it. If we've never SO'd, then that's two calls to VirtualQuery. * * Parameters: * None * * Returns: * TRUE if the thread has a guard page, FALSE otherwise. */ BOOL Thread::DetermineIfGuardPagePresent() { CONTRACTL { NOTHROW; GC_NOTRIGGER; CANNOT_TAKE_LOCK; } CONTRACTL_END; #ifndef TARGET_UNIX BOOL bStackGuarded = FALSE; UINT_PTR uStackBase = (UINT_PTR)GetCachedStackBase(); UINT_PTR uStackLimit = (UINT_PTR)GetCachedStackLimit(); // Note: we start our queries after the hard guard page (one page up from the base of the stack.) We know the // very last region of the stack is never the guard page (its always the uncomitted "hard" guard page) so there's // no need to waste a query on it. bStackGuarded = DoesRegionContainGuardPage(uStackLimit + HARD_GUARD_REGION_SIZE, uStackBase); LOG((LF_EH, LL_INFO10000, "Thread::DetermineIfGuardPagePresent: stack guard page: %s\n", bStackGuarded ? "PRESENT" : "MISSING")); return bStackGuarded; #else // !TARGET_UNIX return TRUE; #endif // !TARGET_UNIX } /* * GetLastNormalStackAddress * * GetLastNormalStackAddress returns the last stack address before the guard * region of this thread. This is the last address that one could write to * before a stack overflow occurs. * * Parameters: * None * * Returns: * Address of the first page of the guard region. */ UINT_PTR Thread::GetLastNormalStackAddress() { WRAPPER_NO_CONTRACT; return GetLastNormalStackAddress((UINT_PTR)m_CacheStackLimit); } /* * GetStackGuarantee * * Returns the amount of stack guaranteed after an SO but before the OS rips the process. * * Parameters: * none * * Returns: * The stack guarantee in OS pages. */ UINT_PTR Thread::GetStackGuarantee() { WRAPPER_NO_CONTRACT; #ifndef TARGET_UNIX // There is a new API available on new OS's called SetThreadStackGuarantee. It allows you to change the size of // the guard region on a per-thread basis. If we're running on an OS that supports the API, then we must query // it to see if someone has changed the size of the guard region for this thread. if (!IsSetThreadStackGuaranteeInUse()) { return SIZEOF_DEFAULT_STACK_GUARANTEE; } ULONG cbNewStackGuarantee = 0; // Passing in a value of 0 means that we're querying, and the value is changed with the new guard region // size. if (::SetThreadStackGuarantee(&cbNewStackGuarantee) && (cbNewStackGuarantee != 0)) { return cbNewStackGuarantee; } #endif // TARGET_UNIX return SIZEOF_DEFAULT_STACK_GUARANTEE; } #ifndef TARGET_UNIX // // MarkPageAsGuard // // Given a page base address, try to turn it into a guard page and then requery to determine success. // // static // private BOOL Thread::MarkPageAsGuard(UINT_PTR uGuardPageBase) { CONTRACTL { NOTHROW; GC_NOTRIGGER; CANNOT_TAKE_LOCK; } CONTRACTL_END; DWORD flOldProtect; ClrVirtualProtect((LPVOID)uGuardPageBase, 1, (PAGE_READWRITE | PAGE_GUARD), &flOldProtect); // Intentionally ignore return value -- if it failed, we'll find out below // and keep moving up the stack until we either succeed or we hit the guard // region. If we don't succeed before we hit the guard region, we'll end up // with a fatal error. // Now, make sure the guard page is really there. If its not, then VirtualProtect most likely failed // because our stack had grown onto the page we were trying to protect by the time we made it into // VirtualProtect. So try the next page down. MEMORY_BASIC_INFORMATION meminfo; SIZE_T dwRes; dwRes = ClrVirtualQuery((const void *)uGuardPageBase, &meminfo, sizeof(meminfo)); return ((sizeof(meminfo) == dwRes) && (meminfo.Protect & PAGE_GUARD)); } /* * RestoreGuardPage * * RestoreGuardPage will replace the guard page on this thread's stack. The assumption is that it was removed by * the OS due to a stack overflow exception. This function requires that you know that you have enough stack space * to restore the guard page, so make sure you know what you're doing when you decide to call this. * * Parameters: * None * * Returns: * Nothing */ VOID Thread::RestoreGuardPage() { CONTRACTL { NOTHROW; GC_NOTRIGGER; CANNOT_TAKE_LOCK; } CONTRACTL_END; BOOL bStackGuarded = DetermineIfGuardPagePresent(); // If the guard page is still there, then just return. if (bStackGuarded) { LOG((LF_EH, LL_INFO100, "Thread::RestoreGuardPage: no need to restore... guard page is already there.\n")); return; } UINT_PTR approxStackPointer; UINT_PTR guardPageBase; UINT_PTR guardRegionThreshold; BOOL pageMissing; if (!bStackGuarded) { // The normal guard page is the 3rd page from the base. The first page is the "hard" guard, the second one is // reserve, and the 3rd one is marked as a guard page. However, since there is now an API (on some platforms) // to change the size of the guard region, we'll just go ahead and protect the next page down from where we are // now. The guard page will get pushed forward again, just like normal, until the next stack overflow. approxStackPointer = (UINT_PTR)GetCurrentSP(); guardPageBase = (UINT_PTR)ALIGN_DOWN(approxStackPointer, GetOsPageSize()) - GetOsPageSize(); // OS uses soft guard page to update the stack info in TEB. If our guard page is not beyond the current stack, the TEB // will not be updated, and then OS's check of stack during exception will fail. if (approxStackPointer >= guardPageBase) { guardPageBase -= GetOsPageSize(); } // If we're currently "too close" to the page we want to mark as a guard then the call to VirtualProtect to set // PAGE_GUARD will fail, but it won't return an error. Therefore, we protect the page, then query it to make // sure it worked. If it didn't, we try the next page down. We'll either find a page to protect, or run into // the guard region and rip the process down with EEPOLICY_HANDLE_FATAL_ERROR below. guardRegionThreshold = GetLastNormalStackAddress(); pageMissing = TRUE; while (pageMissing) { LOG((LF_EH, LL_INFO10000, "Thread::RestoreGuardPage: restoring guard page @ 0x%p, approxStackPointer=0x%p, " "last normal stack address=0x%p\n", guardPageBase, approxStackPointer, guardRegionThreshold)); // Make sure we set the guard page above the guard region. if (guardPageBase < guardRegionThreshold) { goto lFatalError; } if (MarkPageAsGuard(guardPageBase)) { // The current GuardPage should be beyond the current SP. _ASSERTE (guardPageBase < approxStackPointer); pageMissing = FALSE; } else { guardPageBase -= GetOsPageSize(); } } } INDEBUG(DebugLogStackMBIs()); return; lFatalError: STRESS_LOG2(LF_EH, LL_ALWAYS, "Thread::RestoreGuardPage: too close to the guard region (0x%p) to restore guard page @0x%p\n", guardRegionThreshold, guardPageBase); _ASSERTE(!"Too close to the guard page to reset it!"); EEPOLICY_HANDLE_FATAL_ERROR(COR_E_STACKOVERFLOW); } #endif // !TARGET_UNIX #endif // #ifndef DACCESS_COMPILE // // InitRegDisplay: initializes a REGDISPLAY for a thread. If validContext // is false, pRD is filled from the current context of the thread. The // thread's current context is also filled in pctx. If validContext is true, // pctx should point to a valid context and pRD is filled from that. // bool Thread::InitRegDisplay(const PREGDISPLAY pRD, PT_CONTEXT pctx, bool validContext) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; if (!validContext) { if (GetFilterContext()!= NULL) { pctx = GetFilterContext(); } else { #ifdef DACCESS_COMPILE DacNotImpl(); #else pctx->ContextFlags = CONTEXT_FULL; _ASSERTE(this != GetThreadNULLOk()); // do not call GetThreadContext on the active thread BOOL ret = EEGetThreadContext(this, pctx); if (!ret) { SetIP(pctx, 0); #ifdef TARGET_X86 pRD->ControlPC = pctx->Eip; pRD->PCTAddr = (TADDR)&(pctx->Eip); #elif defined(TARGET_AMD64) // nothing more to do here, on Win64 setting the IP to 0 is enough. #elif defined(TARGET_ARM) // nothing more to do here, on Win64 setting the IP to 0 is enough. #else PORTABILITY_ASSERT("NYI for platform Thread::InitRegDisplay"); #endif return false; } #endif // DACCESS_COMPILE } } FillRegDisplay( pRD, pctx ); return true; } void Thread::FillRegDisplay(const PREGDISPLAY pRD, PT_CONTEXT pctx) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; ::FillRegDisplay(pRD, pctx); #if defined(DEBUG_REGDISPLAY) && !defined(TARGET_X86) CONSISTENCY_CHECK(!pRD->_pThread || pRD->_pThread == this); pRD->_pThread = this; CheckRegDisplaySP(pRD); #endif // defined(DEBUG_REGDISPLAY) && !defined(TARGET_X86) } #ifdef DEBUG_REGDISPLAY void CheckRegDisplaySP (REGDISPLAY *pRD) { if (pRD->SP && pRD->_pThread) { #ifndef NO_FIXED_STACK_LIMIT _ASSERTE(pRD->_pThread->IsExecutingOnAltStack() || PTR_VOID(pRD->SP) >= pRD->_pThread->GetCachedStackLimit()); #endif // NO_FIXED_STACK_LIMIT _ASSERTE(pRD->_pThread->IsExecutingOnAltStack() || PTR_VOID(pRD->SP) < pRD->_pThread->GetCachedStackBase()); } } #endif // DEBUG_REGDISPLAY // Trip Functions // ============== // When a thread reaches a safe place, it will rendezvous back with us, via one of // the following trip functions: void CommonTripThread() { #ifndef DACCESS_COMPILE CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; Thread *thread = GetThread(); thread->HandleThreadAbort (); if (thread->CatchAtSafePoint()) { _ASSERTE(!ThreadStore::HoldingThreadStore(thread)); #ifdef FEATURE_HIJACK thread->UnhijackThread(); #endif // FEATURE_HIJACK // Trap thread->PulseGCMode(); } #else DacNotImpl(); #endif // #ifndef DACCESS_COMPILE } #ifndef DACCESS_COMPILE void Thread::SetFilterContext(CONTEXT *pContext) { // SetFilterContext is like pushing a Frame onto the Frame chain. CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; // Absolutely must be in coop to coordinate w/ Runtime suspension. PRECONDITION(GetThread() == this); // must be on current thread. } CONTRACTL_END; m_debuggerFilterContext = pContext; } #endif // #ifndef DACCESS_COMPILE T_CONTEXT *Thread::GetFilterContext(void) { LIMITED_METHOD_DAC_CONTRACT; return m_debuggerFilterContext; } #ifndef DACCESS_COMPILE void Thread::ClearContext() { CONTRACTL { NOTHROW; if (GetThreadNULLOk()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} } CONTRACTL_END; if (!m_pDomain) return; // must set exposed context to null first otherwise object verification // checks will fail AV when m_Context is null m_pDomain = NULL; #ifdef FEATURE_COMINTEROP m_fDisableComObjectEagerCleanup = false; #endif //FEATURE_COMINTEROP } BOOL Thread::HaveExtraWorkForFinalizer() { LIMITED_METHOD_CONTRACT; return RequireSyncBlockCleanup() || ThreadpoolMgr::HaveTimerInfosToFlush() || Thread::CleanupNeededForFinalizedThread() || (m_DetachCount > 0) || SystemDomain::System()->RequireAppDomainCleanup() || YieldProcessorNormalization::IsMeasurementScheduled() || ThreadStore::s_pThreadStore->ShouldTriggerGCForDeadThreads(); } void Thread::DoExtraWorkForFinalizer() { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; _ASSERTE(GetThread() == this); _ASSERTE(this == FinalizerThread::GetFinalizerThread()); #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT if (RequiresCoInitialize()) { SetApartment(AS_InMTA); } #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT if (RequireSyncBlockCleanup()) { #ifndef TARGET_UNIX InteropSyncBlockInfo::FlushStandbyList(); #endif // !TARGET_UNIX #ifdef FEATURE_COMINTEROP RCW::FlushStandbyList(); #endif // FEATURE_COMINTEROP SyncBlockCache::GetSyncBlockCache()->CleanupSyncBlocks(); } if (SystemDomain::System()->RequireAppDomainCleanup()) { SystemDomain::System()->ProcessDelayedUnloadLoaderAllocators(); } if(m_DetachCount > 0 || Thread::CleanupNeededForFinalizedThread()) { Thread::CleanupDetachedThreads(); } // If there were any TimerInfos waiting to be released, they'll get flushed now ThreadpoolMgr::FlushQueueOfTimerInfos(); if (YieldProcessorNormalization::IsMeasurementScheduled()) { GCX_PREEMP(); YieldProcessorNormalization::PerformMeasurement(); } ThreadStore::s_pThreadStore->TriggerGCForDeadThreadsIfNecessary(); } // HELPERS FOR THE BASE OF A MANAGED THREAD, INCLUDING AD TRANSITION SUPPORT // We have numerous places where we start up a managed thread. This includes several places in the // ThreadPool, the 'new Thread(...).Start()' case, and the Finalizer. Try to factor the code so our // base exception handling behavior is consistent across those places. The resulting code is convoluted, // but it's better than the prior situation of each thread being on a different plan. // We need Middle & Outer methods for the usual problem of combining C++ & SEH. /* The effect of all this is that we get: Base of thread -- OS unhandled exception filter that we hook SEH handler from DispatchOuter C++ handler from DispatchMiddle User code that obviously can throw. */ struct ManagedThreadCallState { ADCallBackFcnType pTarget; LPVOID args; UnhandledExceptionLocation filterType; ManagedThreadCallState(ADCallBackFcnType Target,LPVOID Args, UnhandledExceptionLocation FilterType): pTarget(Target), args(Args), filterType(FilterType) { LIMITED_METHOD_CONTRACT; }; }; // The following static helpers are outside of the ManagedThreadBase struct because I // don't want to change threads.h whenever I change the mechanism for how unhandled // exceptions works. The ManagedThreadBase struct is for the public exposure of the // API only. static void ManagedThreadBase_DispatchOuter(ManagedThreadCallState *pCallState); static void ManagedThreadBase_DispatchInner(ManagedThreadCallState *pCallState) { CONTRACTL { GC_TRIGGERS; THROWS; MODE_COOPERATIVE; } CONTRACTL_END; // Go ahead and dispatch the call. (*pCallState->pTarget) (pCallState->args); } static void ManagedThreadBase_DispatchMiddle(ManagedThreadCallState *pCallState) { STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_THROWS; STATIC_CONTRACT_MODE_COOPERATIVE; EX_TRY_CPP_ONLY { // During an unwind, we have some cleanup: // // 1) We should no longer suppress any unhandled exception reporting at the base // of the thread, because any handler that contained the exception to the AppDomain // where it occurred is now being removed from the stack. // // 2) We need to unwind the Frame chain. We cannot do it when we get to the __except clause // because at this point we are in the 2nd phase and the stack has been popped. Any // stack crawling from another thread will see a frame chain in a popped region of stack. // Nor can we pop it in a filter, since this would destroy all the stack-walking information // we need to perform the 2nd pass. So doing it in a C++ destructor will ensure it happens // during the 2nd pass but before the stack is actually popped. class Cleanup { Frame *m_pEntryFrame; Thread *m_pThread; public: Cleanup(Thread* pThread) { m_pThread = pThread; m_pEntryFrame = pThread->m_pFrame; } ~Cleanup() { GCX_COOP(); m_pThread->SetFrame(m_pEntryFrame); } }; Cleanup cleanup(GetThread()); ManagedThreadBase_DispatchInner(pCallState); } EX_CATCH_CPP_ONLY { GCX_COOP(); Exception *pException = GET_EXCEPTION(); // RudeThreadAbort is a pre-allocated instance of ThreadAbort. So the following is sufficient. // For Whidbey, by default only swallow certain exceptions. If reverting back to Everett's // behavior (swallowing all unhandled exception), then swallow all unhandled exception. // if (IsExceptionOfType(kThreadAbortException, pException)) { // Do nothing to swallow the exception } else { // Setting up the unwind_and_continue_handler ensures that C++ exceptions do not leak out. // // Without unwind_and_continue_handler below, the exception will fly up the stack to // this point, where it will be rethrown and thus leak out. INSTALL_UNWIND_AND_CONTINUE_HANDLER; EX_RETHROW; UNINSTALL_UNWIND_AND_CONTINUE_HANDLER; } } EX_END_CATCH(SwallowAllExceptions); } /* typedef struct Param { ManagedThreadCallState * m_pCallState; Frame * m_pFrame; Param(ManagedThreadCallState * pCallState, Frame * pFrame): m_pCallState(pCallState), m_pFrame(pFrame) {} } TryParam; */ typedef struct Param: public NotifyOfCHFFilterWrapperParam { ManagedThreadCallState * m_pCallState; Param(ManagedThreadCallState * pCallState): m_pCallState(pCallState) {} } TryParam; // Dispatch to the appropriate filter, based on the active CallState. static LONG ThreadBaseRedirectingFilter(PEXCEPTION_POINTERS pExceptionInfo, LPVOID pParam) { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_ANY; TryParam * pRealParam = reinterpret_cast<TryParam *>(pParam); ManagedThreadCallState * _pCallState = pRealParam->m_pCallState; LONG ret = -1; // This will invoke the swallowing filter. If that returns EXCEPTION_CONTINUE_SEARCH, // it will trigger unhandled exception processing. // WARNING - ThreadBaseExceptionAppDomainFilter may not return // This occurs when the debugger decides to intercept an exception and catch it in a frame closer // to the leaf than the one executing this filter ret = ThreadBaseExceptionAppDomainFilter(pExceptionInfo, _pCallState); // Although EXCEPTION_EXECUTE_HANDLER can also be returned in cases corresponding to // unhandled exceptions, all of those cases have already notified the debugger of an unhandled // exception which prevents a second notification indicating the exception was caught if (ret == EXCEPTION_EXECUTE_HANDLER) { // WARNING - NotifyOfCHFFilterWrapper may not return // This occurs when the debugger decides to intercept an exception and catch it in a frame closer // to the leaf than the one executing this filter NotifyOfCHFFilterWrapper(pExceptionInfo, pRealParam); } // Get the reference to the current thread.. Thread *pCurThread = GetThread(); // // In the default domain, when an exception goes unhandled on a managed thread whose threadbase is in the VM (e.g. explicitly spawned threads, // ThreadPool threads, finalizer thread, etc), CLR can end up in the unhandled exception processing path twice. // // The first attempt to perform UE processing happens at the managed thread base (via this function). When it completes, // we will set TSNC_ProcessedUnhandledException state against the thread to indicate that we have perform the unhandled exception processing. // // On CoreSys CoreCLR, the host can ask CoreCLR to run all code in the default domain. As a result, when we return from the first attempt to perform UE // processing, the call could return back with EXCEPTION_EXECUTE_HANDLER since, like desktop CoreCLR is instructed by SL host to swallow all unhandled exceptions, // CoreSys CoreCLR can also be instructed by its Phone host to swallow all unhandled exceptions. As a result, the exception dispatch will never continue to go upstack // to the native threadbase in the OS kernel and thus, there will never be a second attempt to perform UE processing. Hence, we dont, and shouldnt, need to set // TSNC_ProcessedUnhandledException state against the thread if we are in SingleAppDomain mode and have been asked to swallow the exception. // // If we continue to set TSNC_ProcessedUnhandledException and a ThreadPool Thread A has an exception go unhandled, we will swallow it correctly for the first time. // The next time Thread A has an exception go unhandled, our UEF will see TSNC_ProcessedUnhandledException set and assume (incorrectly) UE processing has happened and // will fail to honor the host policy (e.g. swallow unhandled exception). Thus, the 2nd unhandled exception may end up crashing the app when it should not. // if (ret != EXCEPTION_EXECUTE_HANDLER) { LOG((LF_EH, LL_INFO100, "ThreadBaseRedirectingFilter: setting TSNC_ProcessedUnhandledException\n")); // Since we have already done unhandled exception processing for it, we dont want it // to happen again if our UEF gets invoked upon returning back to the OS. // // Set the flag to indicate so. pCurThread->SetThreadStateNC(Thread::TSNC_ProcessedUnhandledException); } return ret; } static void ManagedThreadBase_DispatchOuter(ManagedThreadCallState *pCallState) { STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_THROWS; STATIC_CONTRACT_MODE_COOPERATIVE; // HasStarted() must have already been performed by our caller _ASSERTE(GetThreadNULLOk() != NULL); Thread *pThread = GetThread(); #ifdef FEATURE_EH_FUNCLETS Frame *pFrame = pThread->m_pFrame; #endif // FEATURE_EH_FUNCLETS // The sole purpose of having this frame is to tell the debugger that we have a catch handler here // which may swallow managed exceptions. The debugger needs this in order to send a // CatchHandlerFound (CHF) notification. FrameWithCookie<DebuggerU2MCatchHandlerFrame> catchFrame; TryParam param(pCallState); param.pFrame = &catchFrame; struct TryArgs { TryParam *pTryParam; Thread *pThread; BOOL *pfHadException; #ifdef FEATURE_EH_FUNCLETS Frame *pFrame; #endif // FEATURE_EH_FUNCLETS }args; args.pTryParam = &param; args.pThread = pThread; BOOL fHadException = TRUE; args.pfHadException = &fHadException; #ifdef FEATURE_EH_FUNCLETS args.pFrame = pFrame; #endif // FEATURE_EH_FUNCLETS PAL_TRY(TryArgs *, pArgs, &args) { PAL_TRY(TryParam *, pParam, pArgs->pTryParam) { ManagedThreadBase_DispatchMiddle(pParam->m_pCallState); } PAL_EXCEPT_FILTER(ThreadBaseRedirectingFilter) { // Note: one of our C++ exceptions will never reach this filter because they're always caught by // the EX_CATCH in ManagedThreadBase_DispatchMiddle(). // // If eCLRDeterminedPolicy, we only swallow for TA, RTA, and ADU exception. // For eHostDeterminedPolicy, we will swallow all the managed exception. #ifdef FEATURE_EH_FUNCLETS // this must be done after the second pass has run, it does not // reference anything on the stack, so it is safe to run in an // SEH __except clause as well as a C++ catch clause. ExceptionTracker::PopTrackers(pArgs->pFrame); #endif // FEATURE_EH_FUNCLETS _ASSERTE(!pArgs->pThread->IsAbortRequested()); } PAL_ENDTRY; *(pArgs->pfHadException) = FALSE; } PAL_FINALLY { catchFrame.Pop(); } PAL_ENDTRY; } // For the implementation, there are three variants of work possible: // 1. Establish the base of a managed thread, and switch to the correct AppDomain. static void ManagedThreadBase_FullTransition(ADCallBackFcnType pTarget, LPVOID args, UnhandledExceptionLocation filterType) { CONTRACTL { GC_TRIGGERS; THROWS; MODE_COOPERATIVE; } CONTRACTL_END; ManagedThreadCallState CallState(pTarget, args, filterType); ManagedThreadBase_DispatchOuter(&CallState); } // 2. Establish the base of a managed thread, but the AppDomain transition must be // deferred until later. void ManagedThreadBase_NoADTransition(ADCallBackFcnType pTarget, UnhandledExceptionLocation filterType) { CONTRACTL { GC_TRIGGERS; THROWS; MODE_COOPERATIVE; } CONTRACTL_END; AppDomain *pAppDomain = GetAppDomain(); ManagedThreadCallState CallState(pTarget, NULL, filterType); // self-describing, to create a pTurnAround data for eventual delivery to a subsequent AppDomain // transition. CallState.args = &CallState; ManagedThreadBase_DispatchOuter(&CallState); } // And here are the various exposed entrypoints for base thread behavior // The 'new Thread(...).Start()' case from COMSynchronizable kickoff thread worker void ManagedThreadBase::KickOff(ADCallBackFcnType pTarget, LPVOID args) { WRAPPER_NO_CONTRACT; ManagedThreadBase_FullTransition(pTarget, args, ManagedThread); } // The IOCompletion, QueueUserWorkItem, AddTimer, RegisterWaitForSingleObject cases in the ThreadPool void ManagedThreadBase::ThreadPool(ADCallBackFcnType pTarget, LPVOID args) { WRAPPER_NO_CONTRACT; ManagedThreadBase_FullTransition(pTarget, args, ThreadPoolThread); } // The Finalizer thread establishes exception handling at its base, but defers all the AppDomain // transitions. void ManagedThreadBase::FinalizerBase(ADCallBackFcnType pTarget) { WRAPPER_NO_CONTRACT; ManagedThreadBase_NoADTransition(pTarget, FinalizerThread); } //+---------------------------------------------------------------------------- // // Method: Thread::GetStaticFieldAddress private // // Synopsis: Get the address of the field relative to the current thread. // If an address has not been assigned yet then create one. // //+---------------------------------------------------------------------------- LPVOID Thread::GetStaticFieldAddress(FieldDesc *pFD) { CONTRACTL { THROWS; GC_TRIGGERS; } CONTRACTL_END; _ASSERTE(pFD != NULL); _ASSERTE(pFD->IsThreadStatic()); _ASSERTE(!pFD->IsRVA()); // for static field the MethodTable is exact even for generic classes MethodTable *pMT = pFD->GetEnclosingMethodTable(); // We need to make sure that the class has been allocated, however // we should not call the class constructor ThreadStatics::GetTLM(pMT)->EnsureClassAllocated(pMT); PTR_BYTE base = NULL; if (pFD->GetFieldType() == ELEMENT_TYPE_CLASS || pFD->GetFieldType() == ELEMENT_TYPE_VALUETYPE) { base = pMT->GetGCThreadStaticsBasePointer(); } else { base = pMT->GetNonGCThreadStaticsBasePointer(); } _ASSERTE(base != NULL); DWORD offset = pFD->GetOffset(); _ASSERTE(offset <= FIELD_OFFSET_LAST_REAL_OFFSET); LPVOID result = (LPVOID)((PTR_BYTE)base + (DWORD)offset); // For value classes, the handle points at an OBJECTREF // which holds the boxed value class, so derefernce and unbox. if (pFD->GetFieldType() == ELEMENT_TYPE_VALUETYPE) { OBJECTREF obj = ObjectToOBJECTREF(*(Object**) result); result = obj->GetData(); } return result; } #endif // #ifndef DACCESS_COMPILE //+---------------------------------------------------------------------------- // // Method: Thread::GetStaticFieldAddrNoCreate private // // Synopsis: Get the address of the field relative to the thread. // If an address has not been assigned, return NULL. // No creating is allowed. // //+---------------------------------------------------------------------------- TADDR Thread::GetStaticFieldAddrNoCreate(FieldDesc *pFD) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; _ASSERTE(pFD != NULL); _ASSERTE(pFD->IsThreadStatic()); // for static field the MethodTable is exact even for generic classes PTR_MethodTable pMT = pFD->GetEnclosingMethodTable(); PTR_BYTE base = NULL; if (pFD->GetFieldType() == ELEMENT_TYPE_CLASS || pFD->GetFieldType() == ELEMENT_TYPE_VALUETYPE) { base = pMT->GetGCThreadStaticsBasePointer(dac_cast<PTR_Thread>(this)); } else { base = pMT->GetNonGCThreadStaticsBasePointer(dac_cast<PTR_Thread>(this)); } if (base == NULL) return NULL; DWORD offset = pFD->GetOffset(); _ASSERTE(offset <= FIELD_OFFSET_LAST_REAL_OFFSET); TADDR result = dac_cast<TADDR>(base) + (DWORD)offset; // For value classes, the handle points at an OBJECTREF // which holds the boxed value class, so derefernce and unbox. if (pFD->IsByValue()) { _ASSERTE(result != NULL); PTR_Object obj = *PTR_UNCHECKED_OBJECTREF(result); if (obj == NULL) return NULL; result = dac_cast<TADDR>(obj->GetData()); } return result; } #ifndef DACCESS_COMPILE // // NotifyFrameChainOfExceptionUnwind // ----------------------------------------------------------- // This method will walk the Frame chain from pStartFrame to // the last frame that is below pvLimitSP and will call each // frame's ExceptionUnwind method. It will return the first // Frame that is above pvLimitSP. // Frame * Thread::NotifyFrameChainOfExceptionUnwind(Frame* pStartFrame, LPVOID pvLimitSP) { CONTRACTL { NOTHROW; DISABLED(GC_TRIGGERS); // due to UnwindFrameChain from NOTRIGGER areas MODE_COOPERATIVE; PRECONDITION(CheckPointer(pStartFrame)); PRECONDITION(CheckPointer(pvLimitSP)); } CONTRACTL_END; Frame * pFrame; #ifdef _DEBUG // // assert that the specified Thread's Frame chain actually // contains the start Frame. // pFrame = m_pFrame; while ((pFrame != pStartFrame) && (pFrame != FRAME_TOP)) { pFrame = pFrame->Next(); } CONSISTENCY_CHECK_MSG(pFrame == pStartFrame, "pStartFrame is not on pThread's Frame chain!"); #endif // _DEBUG pFrame = pStartFrame; while (pFrame < pvLimitSP) { CONSISTENCY_CHECK(pFrame != PTR_NULL); CONSISTENCY_CHECK((pFrame) > static_cast<Frame *>((LPVOID)GetCurrentSP())); pFrame->ExceptionUnwind(); pFrame = pFrame->Next(); } // return the frame after the last one notified of the unwind return pFrame; } //+---------------------------------------------------------------------------- // // Method: Thread::DeleteThreadStaticData private // // Synopsis: Delete the static data for each appdomain that this thread // visited. // // //+---------------------------------------------------------------------------- void Thread::DeleteThreadStaticData() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; m_ThreadLocalBlock.FreeTable(); } //+---------------------------------------------------------------------------- // // Method: Thread::DeleteThreadStaticData public // // Synopsis: Delete the static data for the given module. This is called // when the AssemblyLoadContext unloads. // // //+---------------------------------------------------------------------------- void Thread::DeleteThreadStaticData(ModuleIndex index) { m_ThreadLocalBlock.FreeTLM(index.m_dwIndex, FALSE /* isThreadShuttingDown */); } OBJECTREF Thread::GetCulture(BOOL bUICulture) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; // This is the case when we're building CoreLib and haven't yet created // the system assembly. if (SystemDomain::System()->SystemAssembly()==NULL || g_fForbidEnterEE) { return NULL; } OBJECTREF pCurrentCulture; MethodDescCallSite propGet(bUICulture ? METHOD__CULTURE_INFO__GET_CURRENT_UI_CULTURE : METHOD__CULTURE_INFO__GET_CURRENT_CULTURE); ARG_SLOT retVal = propGet.Call_RetArgSlot(NULL); pCurrentCulture = ArgSlotToObj(retVal); return pCurrentCulture; } void Thread::SetCulture(OBJECTREF *CultureObj, BOOL bUICulture) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; MethodDescCallSite propSet(bUICulture ? METHOD__CULTURE_INFO__SET_CURRENT_UI_CULTURE : METHOD__CULTURE_INFO__SET_CURRENT_CULTURE); // Set up the Stack. ARG_SLOT pNewArgs[] = { ObjToArgSlot(*CultureObj) }; // Make the actual call. propSet.Call_RetArgSlot(pNewArgs); } BOOL ThreadStore::HoldingThreadStore(Thread *pThread) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; if (pThread) { return (pThread == s_pThreadStore->m_HoldingThread); } else { return (s_pThreadStore->m_holderthreadid.IsCurrentThread()); } } NOINLINE void Thread::OnIncrementCountOverflow(UINT32 *threadLocalCount, UINT64 *overflowCount) { WRAPPER_NO_CONTRACT; _ASSERTE(threadLocalCount != nullptr); _ASSERTE(overflowCount != nullptr); // Increment overflow, accumulate the count for this increment into the overflow count and reset the thread-local count // The thread store lock, in coordination with other places that read these values, ensures that both changes // below become visible together ThreadStoreLockHolder tsl; *threadLocalCount = 0; InterlockedExchangeAdd64((LONGLONG *)overflowCount, (LONGLONG)UINT32_MAX + 1); } UINT64 Thread::GetTotalCount(SIZE_T threadLocalCountOffset, UINT64 *overflowCount) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; _ASSERTE(overflowCount != nullptr); // enumerate all threads, summing their local counts. ThreadStoreLockHolder tsl; UINT64 total = GetOverflowCount(overflowCount); Thread *pThread = NULL; while ((pThread = ThreadStore::GetAllThreadList(pThread, 0, 0)) != NULL) { total += *GetThreadLocalCountRef(pThread, threadLocalCountOffset); } return total; } UINT64 Thread::GetTotalThreadPoolCompletionCount() { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; _ASSERTE(!ThreadpoolMgr::UsePortableThreadPoolForIO()); bool usePortableThreadPool = ThreadpoolMgr::UsePortableThreadPool(); // enumerate all threads, summing their local counts. ThreadStoreLockHolder tsl; UINT64 total = GetIOThreadPoolCompletionCountOverflow(); if (!usePortableThreadPool) { total += GetWorkerThreadPoolCompletionCountOverflow(); } Thread *pThread = NULL; while ((pThread = ThreadStore::GetAllThreadList(pThread, 0, 0)) != NULL) { if (!usePortableThreadPool) { total += pThread->m_workerThreadPoolCompletionCount; } total += pThread->m_ioThreadPoolCompletionCount; } return total; } INT32 Thread::ResetManagedThreadObject(INT32 nPriority) { CONTRACTL { NOTHROW; GC_TRIGGERS; } CONTRACTL_END; GCX_COOP(); return ResetManagedThreadObjectInCoopMode(nPriority); } INT32 Thread::ResetManagedThreadObjectInCoopMode(INT32 nPriority) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; THREADBASEREF pObject = (THREADBASEREF)ObjectFromHandle(m_ExposedObject); if (pObject != NULL) { pObject->ResetName(); nPriority = pObject->GetPriority(); } return nPriority; } BOOL Thread::IsRealThreadPoolResetNeeded() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; if(!IsBackground()) return TRUE; THREADBASEREF pObject = (THREADBASEREF)ObjectFromHandle(m_ExposedObject); if(pObject != NULL) { INT32 nPriority = pObject->GetPriority(); if(nPriority != ThreadNative::PRIORITY_NORMAL) return TRUE; } return FALSE; } void Thread::InternalReset(BOOL fNotFinalizerThread, BOOL fThreadObjectResetNeeded, BOOL fResetAbort) { CONTRACTL { NOTHROW; if(!fNotFinalizerThread || fThreadObjectResetNeeded) {GC_TRIGGERS;} else {GC_NOTRIGGER;} } CONTRACTL_END; _ASSERTE (this == GetThread()); INT32 nPriority = ThreadNative::PRIORITY_NORMAL; if (!fNotFinalizerThread && this == FinalizerThread::GetFinalizerThread()) { nPriority = ThreadNative::PRIORITY_HIGHEST; } if(fThreadObjectResetNeeded) { nPriority = ResetManagedThreadObject(nPriority); } if (fResetAbort && IsAbortRequested()) { UnmarkThreadForAbort(); } if (IsThreadPoolThread() && fThreadObjectResetNeeded) { SetBackground(TRUE); if (nPriority != ThreadNative::PRIORITY_NORMAL) { SetThreadPriority(THREAD_PRIORITY_NORMAL); } } else if (!fNotFinalizerThread && this == FinalizerThread::GetFinalizerThread()) { SetBackground(TRUE); if (nPriority != ThreadNative::PRIORITY_HIGHEST) { SetThreadPriority(THREAD_PRIORITY_HIGHEST); } } } DeadlockAwareLock::DeadlockAwareLock(const char *description) : m_pHoldingThread(NULL) #ifdef _DEBUG , m_description(description) #endif { LIMITED_METHOD_CONTRACT; } DeadlockAwareLock::~DeadlockAwareLock() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; CAN_TAKE_LOCK; } CONTRACTL_END; // Wait for another thread to leave its loop in DeadlockAwareLock::TryBeginEnterLock CrstHolder lock(&g_DeadlockAwareCrst); } CHECK DeadlockAwareLock::CheckDeadlock(Thread *pThread) { CONTRACTL { PRECONDITION(g_DeadlockAwareCrst.OwnedByCurrentThread()); NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // Note that this check is recursive in order to produce descriptive check failure messages. Thread *pHoldingThread = m_pHoldingThread.Load(); if (pThread == pHoldingThread) { CHECK_FAILF(("Lock %p (%s) is held by thread %d", this, m_description, pThread)); } if (pHoldingThread != NULL) { DeadlockAwareLock *pBlockingLock = pHoldingThread->m_pBlockingLock.Load(); if (pBlockingLock != NULL) { CHECK_MSGF(pBlockingLock->CheckDeadlock(pThread), ("Deadlock: Lock %p (%s) is held by thread %d", this, m_description, pHoldingThread)); } } CHECK_OK; } BOOL DeadlockAwareLock::CanEnterLock() { Thread * pThread = GetThread(); CONSISTENCY_CHECK_MSG(pThread->m_pBlockingLock.Load() == NULL, "Cannot block on two locks at once"); { CrstHolder lock(&g_DeadlockAwareCrst); // Look for deadlocks DeadlockAwareLock *pLock = this; while (TRUE) { Thread * holdingThread = pLock->m_pHoldingThread; if (holdingThread == pThread) { // Deadlock! return FALSE; } if (holdingThread == NULL) { // Lock is unheld break; } pLock = holdingThread->m_pBlockingLock; if (pLock == NULL) { // Thread is running free break; } } return TRUE; } } BOOL DeadlockAwareLock::TryBeginEnterLock() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; Thread * pThread = GetThread(); CONSISTENCY_CHECK_MSG(pThread->m_pBlockingLock.Load() == NULL, "Cannot block on two locks at once"); { CrstHolder lock(&g_DeadlockAwareCrst); // Look for deadlocks DeadlockAwareLock *pLock = this; while (TRUE) { Thread * holdingThread = pLock->m_pHoldingThread; if (holdingThread == pThread) { // Deadlock! return FALSE; } if (holdingThread == NULL) { // Lock is unheld break; } pLock = holdingThread->m_pBlockingLock; if (pLock == NULL) { // Thread is running free break; } } pThread->m_pBlockingLock = this; } return TRUE; }; void DeadlockAwareLock::BeginEnterLock() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; Thread * pThread = GetThread(); CONSISTENCY_CHECK_MSG(pThread->m_pBlockingLock.Load() == NULL, "Cannot block on two locks at once"); { CrstHolder lock(&g_DeadlockAwareCrst); // Look for deadlock loop CONSISTENCY_CHECK_MSG(CheckDeadlock(pThread), "Deadlock detected!"); pThread->m_pBlockingLock = this; } }; void DeadlockAwareLock::EndEnterLock() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; Thread * pThread = GetThread(); CONSISTENCY_CHECK(m_pHoldingThread.Load() == NULL || m_pHoldingThread.Load() == pThread); CONSISTENCY_CHECK(pThread->m_pBlockingLock.Load() == this); // No need to take a lock when going from blocking to holding. This // transition implies the lack of a deadlock that other threads can see. // (If they would see a deadlock after the transition, they would see // one before as well.) m_pHoldingThread = pThread; } void DeadlockAwareLock::LeaveLock() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; CONSISTENCY_CHECK(m_pHoldingThread == GetThread()); CONSISTENCY_CHECK(GetThread()->m_pBlockingLock.Load() == NULL); m_pHoldingThread = NULL; } #ifdef _DEBUG // Normally, any thread we operate on has a Thread block in its TLS. But there are // a few special threads we don't normally execute managed code on. // // There is a scenario where we run managed code on such a thread, which is when the // DLL_THREAD_ATTACH notification of an (IJW?) module calls into managed code. This // is incredibly dangerous. If a GC is provoked, the system may have trouble performing // the GC because its threads aren't available yet. static DWORD SpecialEEThreads[10]; static LONG cnt_SpecialEEThreads = 0; void dbgOnly_IdentifySpecialEEThread() { WRAPPER_NO_CONTRACT; LONG ourCount = FastInterlockIncrement(&cnt_SpecialEEThreads); _ASSERTE(ourCount < (LONG) ARRAY_SIZE(SpecialEEThreads)); SpecialEEThreads[ourCount-1] = ::GetCurrentThreadId(); } BOOL dbgOnly_IsSpecialEEThread() { WRAPPER_NO_CONTRACT; DWORD ourId = ::GetCurrentThreadId(); for (LONG i=0; i<cnt_SpecialEEThreads; i++) if (ourId == SpecialEEThreads[i]) return TRUE; // If we have an EE thread doing helper thread duty, then it is temporarily // 'special' too. #ifdef DEBUGGING_SUPPORTED if (g_pDebugInterface) { //<TODO>We probably should use Thread::GetThreadId</TODO> DWORD helperID = g_pDebugInterface->GetHelperThreadID(); if (helperID == ourId) return TRUE; } #endif //<TODO>Clean this up</TODO> if (GetThreadNULLOk() == NULL) return TRUE; return FALSE; } #endif // _DEBUG void Thread::StaticInitialize() { WRAPPER_NO_CONTRACT; #ifdef FEATURE_SPECIAL_USER_MODE_APC InitializeSpecialUserModeApc(); // When CET shadow stacks are enabled, support for special user-mode APCs with the necessary functionality is required _ASSERTE_ALL_BUILDS(__FILE__, !AreCetShadowStacksEnabled() || UseSpecialUserModeApc()); #endif } #ifdef FEATURE_SPECIAL_USER_MODE_APC QueueUserAPC2Proc Thread::s_pfnQueueUserAPC2Proc; static void NTAPI EmptyApcCallback(ULONG_PTR Parameter) { LIMITED_METHOD_CONTRACT; } void Thread::InitializeSpecialUserModeApc() { WRAPPER_NO_CONTRACT; static_assert_no_msg(OFFSETOF__APC_CALLBACK_DATA__ContextRecord == offsetof(CLONE_APC_CALLBACK_DATA, ContextRecord)); HMODULE hKernel32 = WszLoadLibraryEx(WINDOWS_KERNEL32_DLLNAME_W, NULL, LOAD_LIBRARY_SEARCH_SYSTEM32); // See if QueueUserAPC2 exists QueueUserAPC2Proc pfnQueueUserAPC2Proc = (QueueUserAPC2Proc)GetProcAddress(hKernel32, "QueueUserAPC2"); if (pfnQueueUserAPC2Proc == nullptr) { return; } // See if QueueUserAPC2 supports the special user-mode APC with a callback that includes the interrupted CONTEXT. A special // user-mode APC can interrupt a thread that is in user mode and not in a non-alertable wait. if (!(*pfnQueueUserAPC2Proc)(EmptyApcCallback, GetCurrentThread(), 0, SpecialUserModeApcWithContextFlags)) { return; } // In the future, once code paths using the special user-mode APC get some bake time, it should be used regardless of // whether CET shadow stacks are enabled if (AreCetShadowStacksEnabled()) { s_pfnQueueUserAPC2Proc = pfnQueueUserAPC2Proc; } } #endif // FEATURE_SPECIAL_USER_MODE_APC #endif // #ifndef DACCESS_COMPILE #ifdef DACCESS_COMPILE void STATIC_DATA::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { WRAPPER_NO_CONTRACT; DAC_ENUM_STHIS(STATIC_DATA); } void Thread::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { WRAPPER_NO_CONTRACT; DAC_ENUM_DTHIS(); if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE) { if (m_pDomain.IsValid()) { m_pDomain->EnumMemoryRegions(flags, true); } } if (m_debuggerFilterContext.IsValid()) { m_debuggerFilterContext.EnumMem(); } OBJECTHANDLE_EnumMemoryRegions(m_LastThrownObjectHandle); m_ExceptionState.EnumChainMemoryRegions(flags); m_ThreadLocalBlock.EnumMemoryRegions(flags); if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE) { // // Allow all of the frames on the stack to enumerate // their memory. // PTR_Frame frame = m_pFrame; while (frame.IsValid() && frame.GetAddr() != dac_cast<TADDR>(FRAME_TOP)) { frame->EnumMemoryRegions(flags); frame = frame->m_Next; } } // // Try and do a stack trace and save information // for each part of the stack. This is very vulnerable // to memory problems so ignore all exceptions here. // CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED ( EnumMemoryRegionsWorker(flags); ); } void Thread::EnumMemoryRegionsWorker(CLRDataEnumMemoryFlags flags) { WRAPPER_NO_CONTRACT; if (IsUnstarted()) { return; } T_CONTEXT context; BOOL DacGetThreadContext(Thread* thread, T_CONTEXT* context); REGDISPLAY regDisp; StackFrameIterator frameIter; TADDR previousSP = 0; //start at zero; this allows first check to always succeed. TADDR currentSP; // Init value. The Limit itself is not legal, so move one target pointer size to the smallest-magnitude // legal address. currentSP = dac_cast<TADDR>(m_CacheStackLimit) + sizeof(TADDR); if (GetFilterContext()) { context = *GetFilterContext(); } else { DacGetThreadContext(this, &context); } if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE) { AppDomain::GetCurrentDomain()->EnumMemoryRegions(flags, true); } FillRegDisplay(&regDisp, &context); frameIter.Init(this, NULL, &regDisp, 0); while (frameIter.IsValid()) { // // There are identical stack pointer checking semantics in code:ClrDataAccess::EnumMemWalkStackHelper // You ***MUST*** maintain identical semantics for both checks! // // Before we continue, we should check to be sure we have a valid // stack pointer. This is to prevent stacks that are not walked // properly due to // a) stack corruption bugs // b) bad stack walks // from continuing on indefinitely. // // We will force SP to strictly increase. // this check can only happen for real stack frames (i.e. not for explicit frames that don't update the RegDisplay) // for ia64, SP may be equal, but in this case BSP must strictly decrease. // We will force SP to be properly aligned. // We will force SP to be in the correct range. // if (frameIter.GetFrameState() == StackFrameIterator::SFITER_FRAMELESS_METHOD) { // This check cannot be applied to explicit frames; they may not move the SP at all. // Also, a single function can push several on the stack at a time with no guarantees about // ordering so we can't check that the addresses of the explicit frames are monotonically increasing. // There is the potential that the walk will not terminate if a set of explicit frames reference // each other circularly. While we could choose a limit for the number of explicit frames allowed // in a row like the total stack size/pointer size, we have no known problems with this scenario. // Thus for now we ignore it. currentSP = (TADDR)GetRegdisplaySP(&regDisp); if (currentSP <= previousSP) { _ASSERTE(!"Target stack has been corrupted, SP for current frame must be larger than previous frame."); break; } } // On windows desktop, the stack pointer should be a multiple // of pointer-size-aligned in the target address space if (currentSP % sizeof(TADDR) != 0) { _ASSERTE(!"Target stack has been corrupted, SP must be aligned."); break; } if (!IsAddressInStack(currentSP)) { _ASSERTE(!"Target stack has been corrupted, SP must in in the stack range."); break; } // Enumerate the code around the call site to help debugger stack walking heuristics PCODE callEnd = GetControlPC(&regDisp); DacEnumCodeForStackwalk(callEnd); // To stackwalk through funceval frames, we need to be sure to preserve the // DebuggerModule's m_pRuntimeDomainAssembly. This is the only case that doesn't use the current // vmDomainAssembly in code:DacDbiInterfaceImpl::EnumerateInternalFrames. The following // code mimics that function. // Allow failure, since we want to continue attempting to walk the stack regardless of the outcome. EX_TRY { if ((frameIter.GetFrameState() == StackFrameIterator::SFITER_FRAME_FUNCTION) || (frameIter.GetFrameState() == StackFrameIterator::SFITER_SKIPPED_FRAME_FUNCTION)) { Frame * pFrame = frameIter.m_crawl.GetFrame(); g_pDebugInterface->EnumMemoryRegionsIfFuncEvalFrame(flags, pFrame); } } EX_CATCH_RETHROW_ONLY_COR_E_OPERATIONCANCELLED MethodDesc* pMD = frameIter.m_crawl.GetFunction(); if (pMD != NULL) { pMD->EnumMemoryRegions(flags); } previousSP = currentSP; if (frameIter.Next() != SWA_CONTINUE) { break; } } } void ThreadStore::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; WRAPPER_NO_CONTRACT; // This will write out the context of the s_pThreadStore. ie // just the pointer // s_pThreadStore.EnumMem(); if (s_pThreadStore.IsValid()) { // write out the whole ThreadStore structure DacEnumHostDPtrMem(s_pThreadStore); // The thread list may be corrupt, so just // ignore exceptions during enumeration. EX_TRY { Thread* thread = s_pThreadStore->m_ThreadList.GetHead(); LONG dwNumThreads = s_pThreadStore->m_ThreadCount; for (LONG i = 0; (i < dwNumThreads) && (thread != NULL); i++) { // Even if this thread is totally broken and we can't enum it, struggle on. // If we do not, we will leave this loop and not enum stack memory for any further threads. CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED( thread->EnumMemoryRegions(flags); ); thread = s_pThreadStore->m_ThreadList.GetNext(thread); } } EX_CATCH_RETHROW_ONLY_COR_E_OPERATIONCANCELLED } } #endif // #ifdef DACCESS_COMPILE
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/pal/src/safecrt/wcsncpy_s.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*** *wcsncpy_s.c - copy at most n characters of wide-character string * * *Purpose: * defines wcsncpy_s() - copy at most n characters of char16_t string * *******************************************************************************/ #define _SECURECRT_FILL_BUFFER 1 #define _SECURECRT_FILL_BUFFER_THRESHOLD ((size_t)8) #include <string.h> #include <errno.h> #include <limits.h> #include "internal_securecrt.h" #include "mbusafecrt_internal.h" #define _FUNC_PROLOGUE #define _FUNC_NAME wcsncpy_s #define _CHAR char16_t #define _DEST _Dst #define _SIZE _SizeInWords #define _SRC _Src #define _COUNT _Count #include "tcsncpy_s.inl"
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*** *wcsncpy_s.c - copy at most n characters of wide-character string * * *Purpose: * defines wcsncpy_s() - copy at most n characters of char16_t string * *******************************************************************************/ #define _SECURECRT_FILL_BUFFER 1 #define _SECURECRT_FILL_BUFFER_THRESHOLD ((size_t)8) #include <string.h> #include <errno.h> #include <limits.h> #include "internal_securecrt.h" #include "mbusafecrt_internal.h" #define _FUNC_PROLOGUE #define _FUNC_NAME wcsncpy_s #define _CHAR char16_t #define _DEST _Dst #define _SIZE _SizeInWords #define _SRC _Src #define _COUNT _Count #include "tcsncpy_s.inl"
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/vm/array.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // File: ARRAY.CPP // // // File which contains a bunch of of array related things. // #include "common.h" #include "clsload.hpp" #include "method.hpp" #include "class.h" #include "object.h" #include "field.h" #include "util.hpp" #include "excep.h" #include "siginfo.hpp" #include "threads.h" #include "stublink.h" #include "stubcache.h" #include "dllimport.h" #include "gcdesc.h" #include "jitinterface.h" #include "eeconfig.h" #include "log.h" #include "cgensys.h" #include "array.h" #include "typestring.h" #include "sigbuilder.h" #define MAX_SIZE_FOR_VALUECLASS_IN_ARRAY 0xffff #define MAX_PTRS_FOR_VALUECLASSS_IN_ARRAY 0xffff /*****************************************************************************************/ LPCUTF8 ArrayMethodDesc::GetMethodName() { LIMITED_METHOD_DAC_CONTRACT; switch (GetArrayFuncIndex()) { case ARRAY_FUNC_GET: return "Get"; case ARRAY_FUNC_SET: return "Set"; case ARRAY_FUNC_ADDRESS: return "Address"; default: return COR_CTOR_METHOD_NAME; // ".ctor" } } /*****************************************************************************************/ DWORD ArrayMethodDesc::GetAttrs() { LIMITED_METHOD_CONTRACT; return (GetArrayFuncIndex() >= ARRAY_FUNC_CTOR) ? (mdPublic | mdRTSpecialName) : mdPublic; } #ifndef DACCESS_COMPILE /*****************************************************************************************/ // // Generate a short sig (descr) for an array accessors // VOID ArrayClass::GenerateArrayAccessorCallSig( DWORD dwRank, DWORD dwFuncType, // Load, store, or <init> PCCOR_SIGNATURE *ppSig,// Generated signature DWORD * pcSig, // Generated signature size LoaderAllocator *pLoaderAllocator, AllocMemTracker *pamTracker #ifdef FEATURE_ARRAYSTUB_AS_IL ,BOOL fForStubAsIL #endif ) { CONTRACTL { STANDARD_VM_CHECK; PRECONDITION(dwRank >= 1 && dwRank < 0x3ffff); } CONTRACTL_END; PCOR_SIGNATURE pSig; PCOR_SIGNATURE pSigMemory; DWORD dwCallSigSize = dwRank; DWORD dwArgCount = (dwFuncType == ArrayMethodDesc::ARRAY_FUNC_SET) ? dwRank+1 : dwRank; DWORD i; switch (dwFuncType) { // <callconv> <argcount> VAR 0 I4 , ... , I4 case ArrayMethodDesc::ARRAY_FUNC_GET: dwCallSigSize += 4; break; // <callconv> <argcount> VOID I4 , ... , I4 case ArrayMethodDesc::ARRAY_FUNC_CTOR: dwCallSigSize += 3; break; // <callconv> <argcount> VOID I4 , ... , I4 VAR 0 case ArrayMethodDesc::ARRAY_FUNC_SET: dwCallSigSize += 5; break; // <callconv> <argcount> BYREF VAR 0 I4 , ... , I4 case ArrayMethodDesc::ARRAY_FUNC_ADDRESS: dwCallSigSize += 5; #ifdef FEATURE_ARRAYSTUB_AS_IL if(fForStubAsIL) {dwArgCount++; dwCallSigSize++;} #endif break; } // If the argument count is larger than 127 then it will require 2 bytes for the encoding if (dwArgCount > 0x7f) dwCallSigSize++; pSigMemory = (PCOR_SIGNATURE)pamTracker->Track(pLoaderAllocator->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(dwCallSigSize))); pSig = pSigMemory; BYTE callConv = IMAGE_CEE_CS_CALLCONV_DEFAULT + IMAGE_CEE_CS_CALLCONV_HASTHIS; if (dwFuncType == ArrayMethodDesc::ARRAY_FUNC_ADDRESS #ifdef FEATURE_ARRAYSTUB_AS_IL && !fForStubAsIL #endif ) { callConv |= CORINFO_CALLCONV_PARAMTYPE; // Address routine needs special hidden arg } *pSig++ = callConv; pSig += CorSigCompressData(dwArgCount, pSig); // Argument count switch (dwFuncType) { case ArrayMethodDesc::ARRAY_FUNC_GET: *pSig++ = ELEMENT_TYPE_VAR; *pSig++ = 0; // variable 0 break; case ArrayMethodDesc::ARRAY_FUNC_CTOR: *pSig++ = (BYTE) ELEMENT_TYPE_VOID; // Return type break; case ArrayMethodDesc::ARRAY_FUNC_SET: *pSig++ = (BYTE) ELEMENT_TYPE_VOID; // Return type break; case ArrayMethodDesc::ARRAY_FUNC_ADDRESS: *pSig++ = (BYTE) ELEMENT_TYPE_BYREF; // Return type *pSig++ = ELEMENT_TYPE_VAR; *pSig++ = 0; // variable 0 break; } #if defined(FEATURE_ARRAYSTUB_AS_IL ) && !defined(TARGET_X86) if(dwFuncType == ArrayMethodDesc::ARRAY_FUNC_ADDRESS && fForStubAsIL) { *pSig++ = ELEMENT_TYPE_I; } #endif for (i = 0; i < dwRank; i++) *pSig++ = ELEMENT_TYPE_I4; if (dwFuncType == ArrayMethodDesc::ARRAY_FUNC_SET) { *pSig++ = ELEMENT_TYPE_VAR; *pSig++ = 0; // variable 0 } #if defined(FEATURE_ARRAYSTUB_AS_IL ) && defined(TARGET_X86) else if(dwFuncType == ArrayMethodDesc::ARRAY_FUNC_ADDRESS && fForStubAsIL) { *pSig++ = ELEMENT_TYPE_I; } #endif // Make sure the sig came out exactly as large as we expected _ASSERTE(pSig == pSigMemory + dwCallSigSize); *ppSig = pSigMemory; *pcSig = (DWORD)(pSig-pSigMemory); } // // Allocate a new MethodDesc for a fake array method. // // Based on code in class.cpp. // void ArrayClass::InitArrayMethodDesc( ArrayMethodDesc *pNewMD, PCCOR_SIGNATURE pShortSig, DWORD cShortSig, DWORD dwVtableSlot, LoaderAllocator *pLoaderAllocator, AllocMemTracker *pamTracker) { STANDARD_VM_CONTRACT; // Note: The method desc memory is zero initialized pNewMD->SetMemberDef(0); pNewMD->SetSlot((WORD) dwVtableSlot); pNewMD->SetStoredMethodSig(pShortSig, cShortSig); _ASSERTE(!pNewMD->MayHaveNativeCode()); pNewMD->SetTemporaryEntryPoint(pLoaderAllocator, pamTracker); #ifdef _DEBUG _ASSERTE(pNewMD->GetMethodName() && GetDebugClassName()); pNewMD->m_pszDebugMethodName = pNewMD->GetMethodName(); pNewMD->m_pszDebugClassName = GetDebugClassName(); pNewMD->m_pDebugMethodTable = pNewMD->GetMethodTable(); #endif // _DEBUG } /*****************************************************************************************/ MethodTable* Module::CreateArrayMethodTable(TypeHandle elemTypeHnd, CorElementType arrayKind, unsigned Rank, AllocMemTracker *pamTracker) { CONTRACTL { STANDARD_VM_CHECK; PRECONDITION(Rank > 0); } CONTRACTL_END; MethodTable * pElemMT = elemTypeHnd.GetMethodTable(); CorElementType elemType = elemTypeHnd.GetSignatureCorElementType(); // Shared EEClass if there is one MethodTable * pCanonMT = NULL; // Arrays of reference types all share the same EEClass. // // We can't share nested SZARRAYs because they have different // numbers of constructors. // // Unfortunately, we cannot share more because of it would affect user visible System.RuntimeMethodHandle behavior if (CorTypeInfo::IsObjRef(elemType) && elemType != ELEMENT_TYPE_SZARRAY && pElemMT != g_pObjectClass) { // This is loading the canonical version of the array so we can override OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED); pCanonMT = ClassLoader::LoadArrayTypeThrowing(TypeHandle(g_pObjectClass), arrayKind, Rank).AsMethodTable(); } BOOL containsPointers = CorTypeInfo::IsObjRef(elemType); if (elemType == ELEMENT_TYPE_VALUETYPE && pElemMT->ContainsPointers()) containsPointers = TRUE; // this is the base for every array type MethodTable *pParentClass = g_pArrayClass; _ASSERTE(pParentClass); // Must have already loaded the System.Array class _ASSERTE(pParentClass->IsFullyLoaded()); DWORD numCtors = 2; // ELEMENT_TYPE_ARRAY has two ctor functions, one with and one without lower bounds if (arrayKind == ELEMENT_TYPE_SZARRAY) { numCtors = 1; TypeHandle ptr = elemTypeHnd; while (ptr.GetInternalCorElementType() == ELEMENT_TYPE_SZARRAY) { numCtors++; ptr = ptr.GetArrayElementTypeHandle(); } } /****************************************************************************************/ // Parent class is the top level array // The vtable will have all of top level class's methods, plus any methods we have for array classes DWORD numVirtuals = pParentClass->GetNumVirtuals(); DWORD numNonVirtualSlots = numCtors + 3; // 3 for the proper rank Get, Set, Address size_t cbMT = sizeof(MethodTable); cbMT += MethodTable::GetNumVtableIndirections(numVirtuals) * sizeof(MethodTable::VTableIndir_t); // GC info size_t cbCGCDescData = 0; if (containsPointers) { cbCGCDescData += CGCDesc::ComputeSize(1); if (elemType == ELEMENT_TYPE_VALUETYPE) { size_t nSeries = CGCDesc::GetCGCDescFromMT(pElemMT)->GetNumSeries(); cbCGCDescData += (nSeries - 1)*sizeof (val_serie_item); _ASSERTE(cbCGCDescData == CGCDesc::ComputeSizeRepeating(nSeries)); } } DWORD dwMultipurposeSlotsMask = 0; dwMultipurposeSlotsMask |= MethodTable::enum_flag_HasPerInstInfo; dwMultipurposeSlotsMask |= MethodTable::enum_flag_HasInterfaceMap; if (pCanonMT == NULL) dwMultipurposeSlotsMask |= MethodTable::enum_flag_HasNonVirtualSlots; if (this != elemTypeHnd.GetModule()) dwMultipurposeSlotsMask |= MethodTable::enum_flag_HasModuleOverride; // Allocate space for optional members // We always have a non-virtual slot array, see assert at end cbMT += MethodTable::GetOptionalMembersAllocationSize(dwMultipurposeSlotsMask, FALSE, // GenericsStaticsInfo FALSE); // TokenOverflow // This is the offset of the beginning of the interface map size_t imapOffset = cbMT; // This is added after we determine the offset of the interface maps // because the memory appears before the pointer to the method table cbMT += cbCGCDescData; // Inherit top level class's interface map cbMT += pParentClass->GetNumInterfaces() * sizeof(InterfaceInfo_t); BOOL canShareVtableChunks = MethodTable::CanShareVtableChunksFrom(pParentClass, this); size_t offsetOfUnsharedVtableChunks = cbMT; // We either share all of the parent's virtual slots or none of them // If none, we need to allocate space for the slots if (!canShareVtableChunks) { cbMT += numVirtuals * sizeof(MethodTable::VTableIndir2_t); } // Canonical methodtable has an array of non virtual slots pointed to by the optional member size_t offsetOfNonVirtualSlots = 0; size_t cbArrayClass = 0; if (pCanonMT == NULL) { offsetOfNonVirtualSlots = cbMT; cbMT += numNonVirtualSlots * sizeof(PCODE); // Allocate ArrayClass (including space for packed fields), MethodTable, and class name in one alloc. // Remember to pad allocation size for ArrayClass portion to ensure MethodTable is pointer aligned. cbArrayClass = ALIGN_UP(sizeof(ArrayClass) + sizeof(EEClassPackedFields), sizeof(void*)); } // ArrayClass already includes one void* LoaderAllocator* pAllocator= this->GetLoaderAllocator(); BYTE* pMemory = (BYTE *)pamTracker->Track(pAllocator->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(cbArrayClass) + S_SIZE_T(cbMT))); // Note: Memory allocated on loader heap is zero filled // memset(pMemory, 0, sizeof(ArrayClass) + cbMT); ArrayClass* pClass = NULL; if (pCanonMT == NULL) { pClass = ::new (pMemory) ArrayClass(); } // Head of MethodTable memory (starts after ArrayClass), this points at the GCDesc stuff in front // of a method table (if needed) BYTE* pMTHead = pMemory + cbArrayClass + cbCGCDescData; MethodTable* pMT = (MethodTable *) pMTHead; pMT->SetMultipurposeSlotsMask(dwMultipurposeSlotsMask); // Allocate the private data block ("private" during runtime in the ngen'ed case). MethodTableWriteableData * pMTWriteableData = (MethodTableWriteableData *) (BYTE *) pamTracker->Track(pAllocator->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(MethodTableWriteableData)))); pMT->SetWriteableData(pMTWriteableData); // This also disables IBC logging until the type is sufficiently intitialized so // it needs to be done early pMTWriteableData->SetIsNotFullyLoadedForBuildMethodTable(); // Fill in pClass if (pClass != NULL) { pClass->SetInternalCorElementType(arrayKind); pClass->SetAttrClass (tdPublic | tdSerializable | tdSealed); // This class is public, serializable, sealed pClass->SetRank (Rank); pClass->SetArrayElementType (elemType); pClass->SetMethodTable (pMT); // Fill In the method table pClass->SetNumMethods(static_cast<WORD>(numVirtuals + numNonVirtualSlots)); pClass->SetNumNonVirtualSlots(static_cast<WORD>(numNonVirtualSlots)); } pMT->SetNumVirtuals(static_cast<WORD>(numVirtuals)); pMT->SetParentMethodTable(pParentClass); // Method tables for arrays of generic type parameters are needed for type analysis. // No instances will be created, so we can use 0 as element size. DWORD dwComponentSize = CorTypeInfo::IsGenericVariable(elemType) ? 0 : elemTypeHnd.GetSize(); if (elemType == ELEMENT_TYPE_VALUETYPE || elemType == ELEMENT_TYPE_VOID) { // The only way for dwComponentSize to be large is to be part of a value class. If this changes // then the check will need to be moved outside valueclass check. if(dwComponentSize > MAX_SIZE_FOR_VALUECLASS_IN_ARRAY) { StackSString ssElemName; elemTypeHnd.GetName(ssElemName); StackScratchBuffer scratch; elemTypeHnd.GetAssembly()->ThrowTypeLoadException(ssElemName.GetUTF8(scratch), IDS_CLASSLOAD_VALUECLASSTOOLARGE); } } if (pClass != NULL) { pMT->SetClass(pClass); } else { pMT->SetCanonicalMethodTable(pCanonMT); } pMT->SetIsArray(arrayKind); pMT->SetArrayElementTypeHandle(elemTypeHnd); _ASSERTE(FitsIn<WORD>(dwComponentSize)); pMT->SetComponentSize(static_cast<WORD>(dwComponentSize)); pMT->SetLoaderModule(this); pMT->SetLoaderAllocator(pAllocator); pMT->SetModule(elemTypeHnd.GetModule()); if (elemTypeHnd.ContainsGenericVariables()) pMT->SetContainsGenericVariables(); #ifdef FEATURE_TYPEEQUIVALENCE if (elemTypeHnd.HasTypeEquivalence()) { // propagate the type equivalence flag pMT->SetHasTypeEquivalence(); } #endif // FEATURE_TYPEEQUIVALENCE _ASSERTE(pMT->IsClassPreInited()); // Set BaseSize to be size of non-data portion of the array DWORD baseSize = ARRAYBASE_BASESIZE; if (arrayKind == ELEMENT_TYPE_ARRAY) baseSize += Rank*sizeof(DWORD)*2; #if !defined(TARGET_64BIT) && (DATA_ALIGNMENT > 4) if (dwComponentSize >= DATA_ALIGNMENT) baseSize = (DWORD)ALIGN_UP(baseSize, DATA_ALIGNMENT); #endif // !defined(TARGET_64BIT) && (DATA_ALIGNMENT > 4) pMT->SetBaseSize(baseSize); // Because of array method table persisting, we need to copy the map for (unsigned index = 0; index < pParentClass->GetNumInterfaces(); ++index) { InterfaceInfo_t *pIntInfo = (InterfaceInfo_t *) (pMTHead + imapOffset + index * sizeof(InterfaceInfo_t)); pIntInfo->SetMethodTable((pParentClass->GetInterfaceMap() + index)->GetMethodTable()); } pMT->SetInterfaceMap(pParentClass->GetNumInterfaces(), (InterfaceInfo_t *)(pMTHead + imapOffset)); // Copy down flags for these interfaces as well. This is simplified a bit since we know that System.Array // only has a few interfaces and the flags will fit inline into the MethodTable's optional members. _ASSERTE(MethodTable::GetExtraInterfaceInfoSize(pParentClass->GetNumInterfaces()) == 0); pMT->InitializeExtraInterfaceInfo(NULL); for (UINT32 i = 0; i < pParentClass->GetNumInterfaces(); i++) { if (pParentClass->IsInterfaceDeclaredOnClass(i)) pMT->SetInterfaceDeclaredOnClass(i); } // The type is sufficiently initialized for most general purpose accessor methods to work. // Mark the type as restored to avoid asserts. Note that this also enables IBC logging. pMTWriteableData->SetIsRestoredForBuildArrayMethodTable(); { // Fill out the vtable indirection slots MethodTable::VtableIndirectionSlotIterator it = pMT->IterateVtableIndirectionSlots(); while (it.Next()) { if (canShareVtableChunks) { // Share the parent chunk it.SetIndirectionSlot(pParentClass->GetVtableIndirections()[it.GetIndex()]); } else { // Use the locally allocated chunk it.SetIndirectionSlot((MethodTable::VTableIndir2_t *)(pMemory+cbArrayClass+offsetOfUnsharedVtableChunks)); offsetOfUnsharedVtableChunks += it.GetSize(); } } // If we are not sharing parent chunks, copy down the slot contents if (!canShareVtableChunks) { // Copy top level class's vtable - note, vtable is contained within the MethodTable MethodTable::MethodDataWrapper hParentMTData(MethodTable::GetMethodData(pParentClass, FALSE)); for (UINT32 i = 0; i < numVirtuals; i++) { pMT->CopySlotFrom(i, hParentMTData, pParentClass); } } if (pClass != NULL) pMT->SetNonVirtualSlotsArray((PTR_PCODE)(pMemory+cbArrayClass+offsetOfNonVirtualSlots)); } #ifdef _DEBUG StackSString debugName; TypeString::AppendType(debugName, TypeHandle(pMT)); StackScratchBuffer buff; const char* pDebugNameUTF8 = debugName.GetUTF8(buff); S_SIZE_T safeLen = S_SIZE_T(strlen(pDebugNameUTF8))+S_SIZE_T(1); if(safeLen.IsOverflow()) COMPlusThrowHR(COR_E_OVERFLOW); size_t len = safeLen.Value(); char * name = (char*) pamTracker->Track(pAllocator-> GetHighFrequencyHeap()-> AllocMem(safeLen)); strcpy_s(name, len, pDebugNameUTF8); if (pClass != NULL) pClass->SetDebugClassName(name); pMT->SetDebugClassName(name); #endif // _DEBUG if (pClass != NULL) { // Count the number of method descs we need so we can allocate chunks. DWORD dwMethodDescs = numCtors + 3; // for rank specific Get, Set, Address MethodDescChunk * pChunks = MethodDescChunk::CreateChunk(pAllocator->GetHighFrequencyHeap(), dwMethodDescs, mcArray, FALSE /* fNonVtableSlot*/, FALSE /* fNativeCodeSlot */, FALSE /* fComPlusCallInfo */, pMT, pamTracker); pClass->SetChunks(pChunks); MethodTable::IntroducedMethodIterator it(pMT); DWORD dwMethodIndex = 0; for (; it.IsValid(); it.Next()) { ArrayMethodDesc* pNewMD = (ArrayMethodDesc *) it.GetMethodDesc(); _ASSERTE(pNewMD->GetClassification() == mcArray); DWORD dwFuncRank; DWORD dwFuncType; if (dwMethodIndex < ArrayMethodDesc::ARRAY_FUNC_CTOR) { // Generate a new stand-alone, Rank Specific Get, Set and Address method. dwFuncRank = Rank; dwFuncType = dwMethodIndex; } else { if (arrayKind == ELEMENT_TYPE_SZARRAY) { // For SZARRAY arrays, set up multiple constructors. dwFuncRank = 1 + (dwMethodIndex - ArrayMethodDesc::ARRAY_FUNC_CTOR); } else { // ELEMENT_TYPE_ARRAY has two constructors, one without lower bounds and one with lower bounds _ASSERTE((dwMethodIndex == ArrayMethodDesc::ARRAY_FUNC_CTOR) || (dwMethodIndex == ArrayMethodDesc::ARRAY_FUNC_CTOR+1)); dwFuncRank = (dwMethodIndex == ArrayMethodDesc::ARRAY_FUNC_CTOR) ? Rank : 2 * Rank; } dwFuncType = ArrayMethodDesc::ARRAY_FUNC_CTOR; } PCCOR_SIGNATURE pSig; DWORD cSig; pClass->GenerateArrayAccessorCallSig(dwFuncRank, dwFuncType, &pSig, &cSig, pAllocator, pamTracker #ifdef FEATURE_ARRAYSTUB_AS_IL ,0 #endif ); pClass->InitArrayMethodDesc(pNewMD, pSig, cSig, numVirtuals + dwMethodIndex, pAllocator, pamTracker); dwMethodIndex++; } _ASSERTE(dwMethodIndex == dwMethodDescs); } // Set up GC information if (elemType == ELEMENT_TYPE_VALUETYPE || elemType == ELEMENT_TYPE_VOID) { // If it's an array of value classes, there is a different format for the GCDesc if it contains pointers if (pElemMT->ContainsPointers()) { CGCDescSeries *pSeries; // There must be only one series for value classes CGCDescSeries *pByValueSeries = CGCDesc::GetCGCDescFromMT(pElemMT)->GetHighestSeries(); pMT->SetContainsPointers(); // negative series has a special meaning, indicating a different form of GCDesc SSIZE_T nSeries = (SSIZE_T) CGCDesc::GetCGCDescFromMT(pElemMT)->GetNumSeries(); CGCDesc::GetCGCDescFromMT(pMT)->InitValueClassSeries(pMT, nSeries); pSeries = CGCDesc::GetCGCDescFromMT(pMT)->GetHighestSeries(); // sort by offset SSIZE_T AllocSizeSeries; if (!ClrSafeInt<SSIZE_T>::multiply(sizeof(CGCDescSeries*), nSeries, AllocSizeSeries)) COMPlusThrowOM(); CGCDescSeries** sortedSeries = (CGCDescSeries**) _alloca(AllocSizeSeries); int index; for (index = 0; index < nSeries; index++) sortedSeries[index] = &pByValueSeries[-index]; // section sort for (int i = 0; i < nSeries; i++) { for (int j = i+1; j < nSeries; j++) if (sortedSeries[j]->GetSeriesOffset() < sortedSeries[i]->GetSeriesOffset()) { CGCDescSeries* temp = sortedSeries[i]; sortedSeries[i] = sortedSeries[j]; sortedSeries[j] = temp; } } // Offset of the first pointer in the array // This equals the offset of the first pointer if this were an array of entirely pointers, plus the offset of the // first pointer in the value class pSeries->SetSeriesOffset(ArrayBase::GetDataPtrOffset(pMT) + (sortedSeries[0]->GetSeriesOffset()) - OBJECT_SIZE); for (index = 0; index < nSeries; index ++) { size_t numPtrsInBytes = sortedSeries[index]->GetSeriesSize() + pElemMT->GetBaseSize(); size_t currentOffset; size_t skip; currentOffset = sortedSeries[index]->GetSeriesOffset()+numPtrsInBytes; if (index != nSeries-1) { skip = sortedSeries[index+1]->GetSeriesOffset()-currentOffset; } else if (index == 0) { skip = pElemMT->GetNumInstanceFieldBytes() - numPtrsInBytes; } else { skip = sortedSeries[0]->GetSeriesOffset() + pElemMT->GetBaseSize() - OBJECT_BASESIZE - currentOffset; } _ASSERTE(!"Module::CreateArrayMethodTable() - unaligned GC info" || IS_ALIGNED(skip, TARGET_POINTER_SIZE)); unsigned short NumPtrs = (unsigned short) (numPtrsInBytes / TARGET_POINTER_SIZE); if(skip > MAX_SIZE_FOR_VALUECLASS_IN_ARRAY || numPtrsInBytes > MAX_PTRS_FOR_VALUECLASSS_IN_ARRAY) { StackSString ssElemName; elemTypeHnd.GetName(ssElemName); StackScratchBuffer scratch; elemTypeHnd.GetAssembly()->ThrowTypeLoadException(ssElemName.GetUTF8(scratch), IDS_CLASSLOAD_VALUECLASSTOOLARGE); } val_serie_item *val_item = &(pSeries->val_serie[-index]); val_item->set_val_serie_item (NumPtrs, (unsigned short)skip); } } } else if (CorTypeInfo::IsObjRef(elemType)) { CGCDescSeries *pSeries; pMT->SetContainsPointers(); // This array is all GC Pointers CGCDesc::GetCGCDescFromMT(pMT)->Init( pMT, 1 ); pSeries = CGCDesc::GetCGCDescFromMT(pMT)->GetHighestSeries(); pSeries->SetSeriesOffset(ArrayBase::GetDataPtrOffset(pMT)); // For arrays, the size is the negative of the BaseSize (the GC always adds the total // size of the object, so what you end up with is the size of the data portion of the array) pSeries->SetSeriesSize(-(SSIZE_T)(pMT->GetBaseSize())); } // If we get here we are assuming that there was no truncation. If this is not the case then // an array whose base type is not a value class was created and was larger then 0xffff (a word) _ASSERTE(dwComponentSize == pMT->GetComponentSize()); return(pMT); } // Module::CreateArrayMethodTable #ifdef FEATURE_ARRAYSTUB_AS_IL class ArrayOpLinker : public ILStubLinker { ILCodeStream * m_pCode; ArrayMethodDesc * m_pMD; SigTypeContext m_emptyContext; public: ArrayOpLinker(ArrayMethodDesc * pMD) : ILStubLinker(pMD->GetModule(), pMD->GetSignature(), &m_emptyContext, pMD, (ILStubLinkerFlags)(ILSTUB_LINKER_FLAG_STUB_HAS_THIS | ILSTUB_LINKER_FLAG_TARGET_HAS_THIS)) { m_pCode = NewCodeStream(kDispatch); m_pMD = pMD; } void EmitStub() { MethodTable *pMT = m_pMD->GetMethodTable(); BOOL fHasLowerBounds = pMT->GetInternalCorElementType() == ELEMENT_TYPE_ARRAY; DWORD dwTotalLocalNum = NewLocal(ELEMENT_TYPE_I4); DWORD dwLengthLocalNum = NewLocal(ELEMENT_TYPE_I4); mdToken tokRawData = GetToken(CoreLibBinder::GetField(FIELD__RAW_DATA__DATA)); ILCodeLabel * pRangeExceptionLabel = NewCodeLabel(); ILCodeLabel * pRangeExceptionLabel1 = NewCodeLabel(); ILCodeLabel * pCheckDone = NewCodeLabel(); ILCodeLabel * pNotSZArray = NewCodeLabel(); ILCodeLabel * pTypeMismatchExceptionLabel = NULL; UINT rank = pMT->GetRank(); UINT firstIdx = 0; UINT hiddenArgIdx = rank; _ASSERTE(rank>0); #ifndef TARGET_X86 if(m_pMD->GetArrayFuncIndex() == ArrayMethodDesc::ARRAY_FUNC_ADDRESS) { firstIdx = 1; hiddenArgIdx = 0; } #endif ArrayClass *pcls = (ArrayClass*)(pMT->GetClass()); if(pcls->GetArrayElementType() == ELEMENT_TYPE_CLASS) { // Type Check if(m_pMD->GetArrayFuncIndex() == ArrayMethodDesc::ARRAY_FUNC_SET) { ILCodeLabel * pTypeCheckOK = NewCodeLabel(); m_pCode->EmitLDARG(rank); // load value to store m_pCode->EmitBRFALSE(pTypeCheckOK); //Storing NULL is OK m_pCode->EmitLDARG(rank); // return param m_pCode->EmitLDFLDA(tokRawData); m_pCode->EmitLDC(Object::GetOffsetOfFirstField()); m_pCode->EmitSUB(); m_pCode->EmitLDIND_I(); // TypeHandle m_pCode->EmitLoadThis(); m_pCode->EmitLDFLDA(tokRawData); m_pCode->EmitLDC(Object::GetOffsetOfFirstField()); m_pCode->EmitSUB(); m_pCode->EmitLDIND_I(); // Array MT m_pCode->EmitLDC(MethodTable::GetOffsetOfArrayElementTypeHandle()); m_pCode->EmitADD(); m_pCode->EmitLDIND_I(); m_pCode->EmitCEQ(); m_pCode->EmitBRTRUE(pTypeCheckOK); // Same type is OK // Call type check helper m_pCode->EmitLDARG(rank); m_pCode->EmitLoadThis(); m_pCode->EmitCALL(METHOD__STUBHELPERS__ARRAY_TYPE_CHECK,2,0); m_pCode->EmitLabel(pTypeCheckOK); } else if(m_pMD->GetArrayFuncIndex() == ArrayMethodDesc::ARRAY_FUNC_ADDRESS) { // Check that the hidden param is same type ILCodeLabel *pTypeCheckPassed = NewCodeLabel(); pTypeMismatchExceptionLabel = NewCodeLabel(); m_pCode->EmitLDARG(hiddenArgIdx); // hidden param m_pCode->EmitBRFALSE(pTypeCheckPassed); m_pCode->EmitLDARG(hiddenArgIdx); m_pCode->EmitLoadThis(); m_pCode->EmitLDFLDA(tokRawData); m_pCode->EmitLDC(Object::GetOffsetOfFirstField()); m_pCode->EmitSUB(); m_pCode->EmitLDIND_I(); // Array MT m_pCode->EmitCEQ(); m_pCode->EmitBRFALSE(pTypeMismatchExceptionLabel); // throw exception if not same m_pCode->EmitLabel(pTypeCheckPassed); } } if(rank == 1 && fHasLowerBounds) { // check if the array is SZArray. m_pCode->EmitLoadThis(); m_pCode->EmitLDFLDA(tokRawData); m_pCode->EmitLDC(Object::GetOffsetOfFirstField()); m_pCode->EmitSUB(); m_pCode->EmitLDIND_I(); m_pCode->EmitLDC(MethodTable::GetOffsetOfFlags()); m_pCode->EmitADD(); m_pCode->EmitLDIND_I4(); m_pCode->EmitLDC(MethodTable::GetIfArrayThenSzArrayFlag()); m_pCode->EmitAND(); m_pCode->EmitBRFALSE(pNotSZArray); // goto multi-dimmArray code if not szarray // it is SZArray // bounds check m_pCode->EmitLoadThis(); m_pCode->EmitLDFLDA(tokRawData); m_pCode->EmitLDC(ArrayBase::GetOffsetOfNumComponents() - Object::GetOffsetOfFirstField()); m_pCode->EmitADD(); m_pCode->EmitLDIND_I4(); m_pCode->EmitLDARG(firstIdx); m_pCode->EmitBLE_UN(pRangeExceptionLabel); m_pCode->EmitLoadThis(); m_pCode->EmitLDFLDA(tokRawData); m_pCode->EmitLDC(ArrayBase::GetBoundsOffset(pMT) - Object::GetOffsetOfFirstField()); m_pCode->EmitADD(); m_pCode->EmitLDARG(firstIdx); m_pCode->EmitBR(pCheckDone); m_pCode->EmitLabel(pNotSZArray); } for (UINT i = 0; i < rank; i++) { // Cache length m_pCode->EmitLoadThis(); m_pCode->EmitLDFLDA(tokRawData); m_pCode->EmitLDC((ArrayBase::GetBoundsOffset(pMT) - Object::GetOffsetOfFirstField()) + i*sizeof(DWORD)); m_pCode->EmitADD(); m_pCode->EmitLDIND_I4(); m_pCode->EmitSTLOC(dwLengthLocalNum); // Fetch index m_pCode->EmitLDARG(firstIdx + i); if (fHasLowerBounds) { // Load lower bound m_pCode->EmitLoadThis(); m_pCode->EmitLDFLDA(tokRawData); m_pCode->EmitLDC((ArrayBase::GetLowerBoundsOffset(pMT) - Object::GetOffsetOfFirstField()) + i*sizeof(DWORD)); m_pCode->EmitADD(); m_pCode->EmitLDIND_I4(); // Subtract lower bound m_pCode->EmitSUB(); } // Compare with length m_pCode->EmitDUP(); m_pCode->EmitLDLOC(dwLengthLocalNum); m_pCode->EmitBGE_UN(pRangeExceptionLabel1); // Add to the running total if we have one already if (i > 0) { m_pCode->EmitLDLOC(dwTotalLocalNum); m_pCode->EmitLDLOC(dwLengthLocalNum); m_pCode->EmitMUL(); m_pCode->EmitADD(); } m_pCode->EmitSTLOC(dwTotalLocalNum); } // Compute element address m_pCode->EmitLoadThis(); m_pCode->EmitLDFLDA(tokRawData); m_pCode->EmitLDC(ArrayBase::GetDataPtrOffset(pMT) - Object::GetOffsetOfFirstField()); m_pCode->EmitADD(); m_pCode->EmitLDLOC(dwTotalLocalNum); m_pCode->EmitLabel(pCheckDone); m_pCode->EmitCONV_U(); SIZE_T elemSize = pMT->GetComponentSize(); if (elemSize != 1) { m_pCode->EmitLDC(elemSize); m_pCode->EmitMUL(); } m_pCode->EmitADD(); LocalDesc elemType(pMT->GetArrayElementTypeHandle().GetInternalCorElementType()); switch (m_pMD->GetArrayFuncIndex()) { case ArrayMethodDesc::ARRAY_FUNC_GET: if(elemType.ElementType[0]==ELEMENT_TYPE_VALUETYPE) { m_pCode->EmitLDOBJ(GetToken(pMT->GetArrayElementTypeHandle())); } else m_pCode->EmitLDIND_T(&elemType); break; case ArrayMethodDesc::ARRAY_FUNC_SET: // Value to store into the array m_pCode->EmitLDARG(rank); if(elemType.ElementType[0]==ELEMENT_TYPE_VALUETYPE) { m_pCode->EmitSTOBJ(GetToken(pMT->GetArrayElementTypeHandle())); } else m_pCode->EmitSTIND_T(&elemType); break; case ArrayMethodDesc::ARRAY_FUNC_ADDRESS: break; default: _ASSERTE(!"Unknown ArrayFuncIndex"); } m_pCode->EmitRET(); m_pCode->EmitLDC(0); m_pCode->EmitLabel(pRangeExceptionLabel1); // Assumes that there is one "int" pushed on the stack m_pCode->EmitPOP(); mdToken tokIndexOutOfRangeCtorExcep = GetToken((CoreLibBinder::GetException(kIndexOutOfRangeException))->GetDefaultConstructor()); m_pCode->EmitLabel(pRangeExceptionLabel); m_pCode->EmitNEWOBJ(tokIndexOutOfRangeCtorExcep, 0); m_pCode->EmitTHROW(); if(pTypeMismatchExceptionLabel != NULL) { mdToken tokTypeMismatchExcepCtor = GetToken((CoreLibBinder::GetException(kArrayTypeMismatchException))->GetDefaultConstructor()); m_pCode->EmitLabel(pTypeMismatchExceptionLabel); m_pCode->EmitNEWOBJ(tokTypeMismatchExcepCtor, 0); m_pCode->EmitTHROW(); } } }; Stub *GenerateArrayOpStub(ArrayMethodDesc* pMD) { STANDARD_VM_CONTRACT; ArrayOpLinker sl(pMD); sl.EmitStub(); PCCOR_SIGNATURE pSig; DWORD cbSig; AllocMemTracker amTracker; if (pMD->GetArrayFuncIndex() == ArrayMethodDesc::ARRAY_FUNC_ADDRESS) { // The stub has to have signature with explicit hidden argument instead of CORINFO_CALLCONV_PARAMTYPE. // Generate a new signature for the stub here. ((ArrayClass*)(pMD->GetMethodTable()->GetClass()))->GenerateArrayAccessorCallSig(pMD->GetMethodTable()->GetRank(), ArrayMethodDesc::ARRAY_FUNC_ADDRESS, &pSig, &cbSig, pMD->GetLoaderAllocator(), &amTracker, 1); } else { pMD->GetSig(&pSig,&cbSig); } amTracker.SuppressRelease(); static const ILStubTypes stubTypes[3] = { ILSTUB_ARRAYOP_GET, ILSTUB_ARRAYOP_SET, ILSTUB_ARRAYOP_ADDRESS }; _ASSERTE(pMD->GetArrayFuncIndex() <= ARRAY_SIZE(stubTypes)); NDirectStubFlags arrayOpStubFlag = (NDirectStubFlags)stubTypes[pMD->GetArrayFuncIndex()]; MethodDesc * pStubMD = ILStubCache::CreateAndLinkNewILStubMethodDesc(pMD->GetLoaderAllocator(), pMD->GetMethodTable(), arrayOpStubFlag, pMD->GetModule(), pSig, cbSig, NULL, &sl); return Stub::NewStub(JitILStub(pStubMD)); } #else // FEATURE_ARRAYSTUB_AS_IL //======================================================================== // Generates the platform-independent arrayop stub. //======================================================================== void GenerateArrayOpScript(ArrayMethodDesc *pMD, ArrayOpScript *paos) { STANDARD_VM_CONTRACT; ArrayOpIndexSpec *pai = NULL; MethodTable *pMT = pMD->GetMethodTable(); ArrayClass *pcls = (ArrayClass*)(pMT->GetClass()); // The ArrayOpScript and ArrayOpIndexSpec structs double as hash keys // for the ArrayStubCache. Thus, it's imperative that there be no // unused "pad" fields that contain unstable values. // pMT->GetRank() is bounded so the arithmetics here is safe. memset(paos, 0, sizeof(ArrayOpScript) + sizeof(ArrayOpIndexSpec) * pMT->GetRank()); paos->m_rank = (BYTE)(pMT->GetRank()); paos->m_fHasLowerBounds = (pMT->GetInternalCorElementType() == ELEMENT_TYPE_ARRAY); paos->m_ofsoffirst = ArrayBase::GetDataPtrOffset(pMT); switch (pMD->GetArrayFuncIndex()) { case ArrayMethodDesc::ARRAY_FUNC_GET: paos->m_op = ArrayOpScript::LOAD; break; case ArrayMethodDesc::ARRAY_FUNC_SET: paos->m_op = ArrayOpScript::STORE; break; case ArrayMethodDesc::ARRAY_FUNC_ADDRESS: paos->m_op = ArrayOpScript::LOADADDR; break; default: _ASSERTE(!"Unknown array func!"); } MetaSig msig(pMD); _ASSERTE(!msig.IsVarArg()); // No array signature is varargs, code below does not expect it. switch (pMT->GetArrayElementTypeHandle().GetInternalCorElementType()) { // These are all different because of sign extension case ELEMENT_TYPE_I1: paos->m_elemsize = 1; paos->m_signed = TRUE; break; case ELEMENT_TYPE_BOOLEAN: case ELEMENT_TYPE_U1: paos->m_elemsize = 1; break; case ELEMENT_TYPE_I2: paos->m_elemsize = 2; paos->m_signed = TRUE; break; case ELEMENT_TYPE_CHAR: case ELEMENT_TYPE_U2: paos->m_elemsize = 2; break; case ELEMENT_TYPE_I4: IN_TARGET_32BIT(case ELEMENT_TYPE_I:) paos->m_elemsize = 4; paos->m_signed = TRUE; break; case ELEMENT_TYPE_U4: IN_TARGET_32BIT(case ELEMENT_TYPE_U:) IN_TARGET_32BIT(case ELEMENT_TYPE_PTR:) paos->m_elemsize = 4; break; case ELEMENT_TYPE_I8: IN_TARGET_64BIT(case ELEMENT_TYPE_I:) paos->m_elemsize = 8; paos->m_signed = TRUE; break; case ELEMENT_TYPE_U8: IN_TARGET_64BIT(case ELEMENT_TYPE_U:) IN_TARGET_64BIT(case ELEMENT_TYPE_PTR:) paos->m_elemsize = 8; break; case ELEMENT_TYPE_R4: paos->m_elemsize = 4; paos->m_flags |= paos->ISFPUTYPE; break; case ELEMENT_TYPE_R8: paos->m_elemsize = 8; paos->m_flags |= paos->ISFPUTYPE; break; case ELEMENT_TYPE_SZARRAY: case ELEMENT_TYPE_ARRAY: case ELEMENT_TYPE_CLASS: case ELEMENT_TYPE_STRING: case ELEMENT_TYPE_OBJECT: paos->m_elemsize = sizeof(LPVOID); paos->m_flags |= paos->NEEDSWRITEBARRIER; if (paos->m_op != ArrayOpScript::LOAD) { paos->m_flags |= paos->NEEDSTYPECHECK; } break; case ELEMENT_TYPE_VALUETYPE: paos->m_elemsize = pMT->GetComponentSize(); if (pMT->ContainsPointers()) { paos->m_gcDesc = CGCDesc::GetCGCDescFromMT(pMT); paos->m_flags |= paos->NEEDSWRITEBARRIER; } break; default: _ASSERTE(!"Unsupported Array Type!"); } ArgIterator argit(&msig); #ifdef TARGET_X86 paos->m_cbretpop = argit.CbStackPop(); #endif if (argit.HasRetBuffArg()) { paos->m_flags |= ArrayOpScript::HASRETVALBUFFER; paos->m_fRetBufLoc = argit.GetRetBuffArgOffset(); } if (paos->m_op == ArrayOpScript::LOADADDR) { paos->m_typeParamOffs = argit.GetParamTypeArgOffset(); } for (UINT idx = 0; idx < paos->m_rank; idx++) { pai = (ArrayOpIndexSpec*)(paos->GetArrayOpIndexSpecs() + idx); pai->m_idxloc = argit.GetNextOffset(); pai->m_lboundofs = paos->m_fHasLowerBounds ? (UINT32) (ArrayBase::GetLowerBoundsOffset(pMT) + idx*sizeof(DWORD)) : 0; pai->m_lengthofs = ArrayBase::GetBoundsOffset(pMT) + idx*sizeof(DWORD); } if (paos->m_op == paos->STORE) { paos->m_fValLoc = argit.GetNextOffset(); } } //--------------------------------------------------------- // Cache for array stubs //--------------------------------------------------------- class ArrayStubCache : public StubCacheBase { virtual void CompileStub(const BYTE *pRawStub, StubLinker *psl); virtual UINT Length(const BYTE *pRawStub); public: public: ArrayStubCache(LoaderHeap* heap) : StubCacheBase(heap) { } static ArrayStubCache * GetArrayStubCache() { STANDARD_VM_CONTRACT; static ArrayStubCache * s_pArrayStubCache = NULL; if (s_pArrayStubCache == NULL) { ArrayStubCache * pArrayStubCache = new ArrayStubCache(SystemDomain::GetGlobalLoaderAllocator()->GetStubHeap()); if (FastInterlockCompareExchangePointer(&s_pArrayStubCache, pArrayStubCache, NULL) != NULL) delete pArrayStubCache; } return s_pArrayStubCache; } }; Stub *GenerateArrayOpStub(ArrayMethodDesc* pMD) { STANDARD_VM_CONTRACT; MethodTable *pMT = pMD->GetMethodTable(); ArrayOpScript *paos = (ArrayOpScript*)_alloca(sizeof(ArrayOpScript) + sizeof(ArrayOpIndexSpec) * pMT->GetRank()); GenerateArrayOpScript(pMD, paos); Stub *pArrayOpStub; pArrayOpStub = ArrayStubCache::GetArrayStubCache()->Canonicalize((const BYTE *)paos); if (pArrayOpStub == NULL) COMPlusThrowOM(); return pArrayOpStub; } void ArrayStubCache::CompileStub(const BYTE *pRawStub, StubLinker *psl) { STANDARD_VM_CONTRACT; ((CPUSTUBLINKER*)psl)->EmitArrayOpStub((ArrayOpScript*)pRawStub); } UINT ArrayStubCache::Length(const BYTE *pRawStub) { LIMITED_METHOD_CONTRACT; return ((ArrayOpScript*)pRawStub)->Length(); } #endif // FEATURE_ARRAYSTUB_AS_IL //--------------------------------------------------------------------- // This method returns TRUE if pInterfaceMT could be one of the interfaces // that are implicitly implemented by SZArrays BOOL IsImplicitInterfaceOfSZArray(MethodTable *pInterfaceMT) { LIMITED_METHOD_CONTRACT; PRECONDITION(pInterfaceMT->IsInterface()); PRECONDITION(pInterfaceMT->HasInstantiation()); // Is target interface Anything<T> in CoreLib? if (!pInterfaceMT->HasInstantiation() || !pInterfaceMT->GetModule()->IsSystem()) return FALSE; unsigned rid = pInterfaceMT->GetTypeDefRid(); // Is target interface IList<T> or one of its ancestors, or IReadOnlyList<T>? return (rid == CoreLibBinder::GetExistingClass(CLASS__ILISTGENERIC)->GetTypeDefRid() || rid == CoreLibBinder::GetExistingClass(CLASS__ICOLLECTIONGENERIC)->GetTypeDefRid() || rid == CoreLibBinder::GetExistingClass(CLASS__IENUMERABLEGENERIC)->GetTypeDefRid() || rid == CoreLibBinder::GetExistingClass(CLASS__IREADONLYCOLLECTIONGENERIC)->GetTypeDefRid() || rid == CoreLibBinder::GetExistingClass(CLASS__IREADONLYLISTGENERIC)->GetTypeDefRid()); } //---------------------------------------------------------------------------------- // Calls to (IList<T>)(array).Meth are actually implemented by SZArrayHelper.Meth<T> // This workaround exists for two reasons: // // - For working set reasons, we don't want insert these methods in the array hierachy // in the normal way. // - For platform and devtime reasons, we still want to use the C# compiler to generate // the method bodies. // // (Though it's questionable whether any devtime was saved.) // // This method takes care of the mapping between the two. Give it a method // IList<T>.Meth, and it will return SZArrayHelper.Meth<T>. //---------------------------------------------------------------------------------- MethodDesc* GetActualImplementationForArrayGenericIListOrIReadOnlyListMethod(MethodDesc *pItfcMeth, TypeHandle theT) { CONTRACTL { THROWS; GC_TRIGGERS; INJECT_FAULT(COMPlusThrowOM()); } CONTRACTL_END int slot = pItfcMeth->GetSlot(); // We need to pick the right starting method depending on the depth of the inheritance chain static const BinderMethodID startingMethod[] = { METHOD__SZARRAYHELPER__GETENUMERATOR, // First method of IEnumerable`1 METHOD__SZARRAYHELPER__GET_COUNT, // First method of ICollection`1/IReadOnlyCollection`1 METHOD__SZARRAYHELPER__GET_ITEM // First method of IList`1/IReadOnlyList`1 }; // Subtract one for the non-generic IEnumerable that the generic enumerable inherits from unsigned int inheritanceDepth = pItfcMeth->GetMethodTable()->GetNumInterfaces() - 1; PREFIX_ASSUME(0 <= inheritanceDepth && inheritanceDepth < ARRAY_SIZE(startingMethod)); MethodDesc *pGenericImplementor = CoreLibBinder::GetMethod((BinderMethodID)(startingMethod[inheritanceDepth] + slot)); // The most common reason for this assert is that the order of the SZArrayHelper methods in // corelib.h does not match the order they are implemented on the generic interfaces. _ASSERTE(pGenericImplementor == MemberLoader::FindMethodByName(g_pSZArrayHelperClass, pItfcMeth->GetName())); // OPTIMIZATION: For any method other than GetEnumerator(), we can safely substitute // "Object" for reference-type theT's. This causes fewer methods to be instantiated. if (startingMethod[inheritanceDepth] != METHOD__SZARRAYHELPER__GETENUMERATOR && !theT.IsValueType()) { theT = TypeHandle(g_pObjectClass); } MethodDesc *pActualImplementor = MethodDesc::FindOrCreateAssociatedMethodDesc(pGenericImplementor, g_pSZArrayHelperClass, FALSE, Instantiation(&theT, 1), FALSE // allowInstParam ); _ASSERTE(pActualImplementor); return pActualImplementor; } #endif // DACCESS_COMPILE CorElementType GetNormalizedIntegralArrayElementType(CorElementType elementType) { LIMITED_METHOD_CONTRACT; _ASSERTE(CorTypeInfo::IsPrimitiveType_NoThrow(elementType)); // Array Primitive types such as E_T_I4 and E_T_U4 are interchangeable // Enums with interchangeable underlying types are interchangable // BOOL is NOT interchangeable with I1/U1, neither CHAR -- with I2/U2 switch (elementType) { case ELEMENT_TYPE_U1: case ELEMENT_TYPE_U2: case ELEMENT_TYPE_U4: case ELEMENT_TYPE_U8: case ELEMENT_TYPE_U: return (CorElementType)(elementType - 1); // normalize to signed type default: break; } return elementType; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // File: ARRAY.CPP // // // File which contains a bunch of of array related things. // #include "common.h" #include "clsload.hpp" #include "method.hpp" #include "class.h" #include "object.h" #include "field.h" #include "util.hpp" #include "excep.h" #include "siginfo.hpp" #include "threads.h" #include "stublink.h" #include "stubcache.h" #include "dllimport.h" #include "gcdesc.h" #include "jitinterface.h" #include "eeconfig.h" #include "log.h" #include "cgensys.h" #include "array.h" #include "typestring.h" #include "sigbuilder.h" #define MAX_SIZE_FOR_VALUECLASS_IN_ARRAY 0xffff #define MAX_PTRS_FOR_VALUECLASSS_IN_ARRAY 0xffff /*****************************************************************************************/ LPCUTF8 ArrayMethodDesc::GetMethodName() { LIMITED_METHOD_DAC_CONTRACT; switch (GetArrayFuncIndex()) { case ARRAY_FUNC_GET: return "Get"; case ARRAY_FUNC_SET: return "Set"; case ARRAY_FUNC_ADDRESS: return "Address"; default: return COR_CTOR_METHOD_NAME; // ".ctor" } } /*****************************************************************************************/ DWORD ArrayMethodDesc::GetAttrs() { LIMITED_METHOD_CONTRACT; return (GetArrayFuncIndex() >= ARRAY_FUNC_CTOR) ? (mdPublic | mdRTSpecialName) : mdPublic; } #ifndef DACCESS_COMPILE /*****************************************************************************************/ // // Generate a short sig (descr) for an array accessors // VOID ArrayClass::GenerateArrayAccessorCallSig( DWORD dwRank, DWORD dwFuncType, // Load, store, or <init> PCCOR_SIGNATURE *ppSig,// Generated signature DWORD * pcSig, // Generated signature size LoaderAllocator *pLoaderAllocator, AllocMemTracker *pamTracker #ifdef FEATURE_ARRAYSTUB_AS_IL ,BOOL fForStubAsIL #endif ) { CONTRACTL { STANDARD_VM_CHECK; PRECONDITION(dwRank >= 1 && dwRank < 0x3ffff); } CONTRACTL_END; PCOR_SIGNATURE pSig; PCOR_SIGNATURE pSigMemory; DWORD dwCallSigSize = dwRank; DWORD dwArgCount = (dwFuncType == ArrayMethodDesc::ARRAY_FUNC_SET) ? dwRank+1 : dwRank; DWORD i; switch (dwFuncType) { // <callconv> <argcount> VAR 0 I4 , ... , I4 case ArrayMethodDesc::ARRAY_FUNC_GET: dwCallSigSize += 4; break; // <callconv> <argcount> VOID I4 , ... , I4 case ArrayMethodDesc::ARRAY_FUNC_CTOR: dwCallSigSize += 3; break; // <callconv> <argcount> VOID I4 , ... , I4 VAR 0 case ArrayMethodDesc::ARRAY_FUNC_SET: dwCallSigSize += 5; break; // <callconv> <argcount> BYREF VAR 0 I4 , ... , I4 case ArrayMethodDesc::ARRAY_FUNC_ADDRESS: dwCallSigSize += 5; #ifdef FEATURE_ARRAYSTUB_AS_IL if(fForStubAsIL) {dwArgCount++; dwCallSigSize++;} #endif break; } // If the argument count is larger than 127 then it will require 2 bytes for the encoding if (dwArgCount > 0x7f) dwCallSigSize++; pSigMemory = (PCOR_SIGNATURE)pamTracker->Track(pLoaderAllocator->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(dwCallSigSize))); pSig = pSigMemory; BYTE callConv = IMAGE_CEE_CS_CALLCONV_DEFAULT + IMAGE_CEE_CS_CALLCONV_HASTHIS; if (dwFuncType == ArrayMethodDesc::ARRAY_FUNC_ADDRESS #ifdef FEATURE_ARRAYSTUB_AS_IL && !fForStubAsIL #endif ) { callConv |= CORINFO_CALLCONV_PARAMTYPE; // Address routine needs special hidden arg } *pSig++ = callConv; pSig += CorSigCompressData(dwArgCount, pSig); // Argument count switch (dwFuncType) { case ArrayMethodDesc::ARRAY_FUNC_GET: *pSig++ = ELEMENT_TYPE_VAR; *pSig++ = 0; // variable 0 break; case ArrayMethodDesc::ARRAY_FUNC_CTOR: *pSig++ = (BYTE) ELEMENT_TYPE_VOID; // Return type break; case ArrayMethodDesc::ARRAY_FUNC_SET: *pSig++ = (BYTE) ELEMENT_TYPE_VOID; // Return type break; case ArrayMethodDesc::ARRAY_FUNC_ADDRESS: *pSig++ = (BYTE) ELEMENT_TYPE_BYREF; // Return type *pSig++ = ELEMENT_TYPE_VAR; *pSig++ = 0; // variable 0 break; } #if defined(FEATURE_ARRAYSTUB_AS_IL ) && !defined(TARGET_X86) if(dwFuncType == ArrayMethodDesc::ARRAY_FUNC_ADDRESS && fForStubAsIL) { *pSig++ = ELEMENT_TYPE_I; } #endif for (i = 0; i < dwRank; i++) *pSig++ = ELEMENT_TYPE_I4; if (dwFuncType == ArrayMethodDesc::ARRAY_FUNC_SET) { *pSig++ = ELEMENT_TYPE_VAR; *pSig++ = 0; // variable 0 } #if defined(FEATURE_ARRAYSTUB_AS_IL ) && defined(TARGET_X86) else if(dwFuncType == ArrayMethodDesc::ARRAY_FUNC_ADDRESS && fForStubAsIL) { *pSig++ = ELEMENT_TYPE_I; } #endif // Make sure the sig came out exactly as large as we expected _ASSERTE(pSig == pSigMemory + dwCallSigSize); *ppSig = pSigMemory; *pcSig = (DWORD)(pSig-pSigMemory); } // // Allocate a new MethodDesc for a fake array method. // // Based on code in class.cpp. // void ArrayClass::InitArrayMethodDesc( ArrayMethodDesc *pNewMD, PCCOR_SIGNATURE pShortSig, DWORD cShortSig, DWORD dwVtableSlot, LoaderAllocator *pLoaderAllocator, AllocMemTracker *pamTracker) { STANDARD_VM_CONTRACT; // Note: The method desc memory is zero initialized pNewMD->SetMemberDef(0); pNewMD->SetSlot((WORD) dwVtableSlot); pNewMD->SetStoredMethodSig(pShortSig, cShortSig); _ASSERTE(!pNewMD->MayHaveNativeCode()); pNewMD->SetTemporaryEntryPoint(pLoaderAllocator, pamTracker); #ifdef _DEBUG _ASSERTE(pNewMD->GetMethodName() && GetDebugClassName()); pNewMD->m_pszDebugMethodName = pNewMD->GetMethodName(); pNewMD->m_pszDebugClassName = GetDebugClassName(); pNewMD->m_pDebugMethodTable = pNewMD->GetMethodTable(); #endif // _DEBUG } /*****************************************************************************************/ MethodTable* Module::CreateArrayMethodTable(TypeHandle elemTypeHnd, CorElementType arrayKind, unsigned Rank, AllocMemTracker *pamTracker) { CONTRACTL { STANDARD_VM_CHECK; PRECONDITION(Rank > 0); } CONTRACTL_END; MethodTable * pElemMT = elemTypeHnd.GetMethodTable(); CorElementType elemType = elemTypeHnd.GetSignatureCorElementType(); // Shared EEClass if there is one MethodTable * pCanonMT = NULL; // Arrays of reference types all share the same EEClass. // // We can't share nested SZARRAYs because they have different // numbers of constructors. // // Unfortunately, we cannot share more because of it would affect user visible System.RuntimeMethodHandle behavior if (CorTypeInfo::IsObjRef(elemType) && elemType != ELEMENT_TYPE_SZARRAY && pElemMT != g_pObjectClass) { // This is loading the canonical version of the array so we can override OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED); pCanonMT = ClassLoader::LoadArrayTypeThrowing(TypeHandle(g_pObjectClass), arrayKind, Rank).AsMethodTable(); } BOOL containsPointers = CorTypeInfo::IsObjRef(elemType); if (elemType == ELEMENT_TYPE_VALUETYPE && pElemMT->ContainsPointers()) containsPointers = TRUE; // this is the base for every array type MethodTable *pParentClass = g_pArrayClass; _ASSERTE(pParentClass); // Must have already loaded the System.Array class _ASSERTE(pParentClass->IsFullyLoaded()); DWORD numCtors = 2; // ELEMENT_TYPE_ARRAY has two ctor functions, one with and one without lower bounds if (arrayKind == ELEMENT_TYPE_SZARRAY) { numCtors = 1; TypeHandle ptr = elemTypeHnd; while (ptr.GetInternalCorElementType() == ELEMENT_TYPE_SZARRAY) { numCtors++; ptr = ptr.GetArrayElementTypeHandle(); } } /****************************************************************************************/ // Parent class is the top level array // The vtable will have all of top level class's methods, plus any methods we have for array classes DWORD numVirtuals = pParentClass->GetNumVirtuals(); DWORD numNonVirtualSlots = numCtors + 3; // 3 for the proper rank Get, Set, Address size_t cbMT = sizeof(MethodTable); cbMT += MethodTable::GetNumVtableIndirections(numVirtuals) * sizeof(MethodTable::VTableIndir_t); // GC info size_t cbCGCDescData = 0; if (containsPointers) { cbCGCDescData += CGCDesc::ComputeSize(1); if (elemType == ELEMENT_TYPE_VALUETYPE) { size_t nSeries = CGCDesc::GetCGCDescFromMT(pElemMT)->GetNumSeries(); cbCGCDescData += (nSeries - 1)*sizeof (val_serie_item); _ASSERTE(cbCGCDescData == CGCDesc::ComputeSizeRepeating(nSeries)); } } DWORD dwMultipurposeSlotsMask = 0; dwMultipurposeSlotsMask |= MethodTable::enum_flag_HasPerInstInfo; dwMultipurposeSlotsMask |= MethodTable::enum_flag_HasInterfaceMap; if (pCanonMT == NULL) dwMultipurposeSlotsMask |= MethodTable::enum_flag_HasNonVirtualSlots; if (this != elemTypeHnd.GetModule()) dwMultipurposeSlotsMask |= MethodTable::enum_flag_HasModuleOverride; // Allocate space for optional members // We always have a non-virtual slot array, see assert at end cbMT += MethodTable::GetOptionalMembersAllocationSize(dwMultipurposeSlotsMask, FALSE, // GenericsStaticsInfo FALSE); // TokenOverflow // This is the offset of the beginning of the interface map size_t imapOffset = cbMT; // This is added after we determine the offset of the interface maps // because the memory appears before the pointer to the method table cbMT += cbCGCDescData; // Inherit top level class's interface map cbMT += pParentClass->GetNumInterfaces() * sizeof(InterfaceInfo_t); BOOL canShareVtableChunks = MethodTable::CanShareVtableChunksFrom(pParentClass, this); size_t offsetOfUnsharedVtableChunks = cbMT; // We either share all of the parent's virtual slots or none of them // If none, we need to allocate space for the slots if (!canShareVtableChunks) { cbMT += numVirtuals * sizeof(MethodTable::VTableIndir2_t); } // Canonical methodtable has an array of non virtual slots pointed to by the optional member size_t offsetOfNonVirtualSlots = 0; size_t cbArrayClass = 0; if (pCanonMT == NULL) { offsetOfNonVirtualSlots = cbMT; cbMT += numNonVirtualSlots * sizeof(PCODE); // Allocate ArrayClass (including space for packed fields), MethodTable, and class name in one alloc. // Remember to pad allocation size for ArrayClass portion to ensure MethodTable is pointer aligned. cbArrayClass = ALIGN_UP(sizeof(ArrayClass) + sizeof(EEClassPackedFields), sizeof(void*)); } // ArrayClass already includes one void* LoaderAllocator* pAllocator= this->GetLoaderAllocator(); BYTE* pMemory = (BYTE *)pamTracker->Track(pAllocator->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(cbArrayClass) + S_SIZE_T(cbMT))); // Note: Memory allocated on loader heap is zero filled // memset(pMemory, 0, sizeof(ArrayClass) + cbMT); ArrayClass* pClass = NULL; if (pCanonMT == NULL) { pClass = ::new (pMemory) ArrayClass(); } // Head of MethodTable memory (starts after ArrayClass), this points at the GCDesc stuff in front // of a method table (if needed) BYTE* pMTHead = pMemory + cbArrayClass + cbCGCDescData; MethodTable* pMT = (MethodTable *) pMTHead; pMT->SetMultipurposeSlotsMask(dwMultipurposeSlotsMask); // Allocate the private data block ("private" during runtime in the ngen'ed case). MethodTableWriteableData * pMTWriteableData = (MethodTableWriteableData *) (BYTE *) pamTracker->Track(pAllocator->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(MethodTableWriteableData)))); pMT->SetWriteableData(pMTWriteableData); // This also disables IBC logging until the type is sufficiently intitialized so // it needs to be done early pMTWriteableData->SetIsNotFullyLoadedForBuildMethodTable(); // Fill in pClass if (pClass != NULL) { pClass->SetInternalCorElementType(arrayKind); pClass->SetAttrClass (tdPublic | tdSerializable | tdSealed); // This class is public, serializable, sealed pClass->SetRank (Rank); pClass->SetArrayElementType (elemType); pClass->SetMethodTable (pMT); // Fill In the method table pClass->SetNumMethods(static_cast<WORD>(numVirtuals + numNonVirtualSlots)); pClass->SetNumNonVirtualSlots(static_cast<WORD>(numNonVirtualSlots)); } pMT->SetNumVirtuals(static_cast<WORD>(numVirtuals)); pMT->SetParentMethodTable(pParentClass); // Method tables for arrays of generic type parameters are needed for type analysis. // No instances will be created, so we can use 0 as element size. DWORD dwComponentSize = CorTypeInfo::IsGenericVariable(elemType) ? 0 : elemTypeHnd.GetSize(); if (elemType == ELEMENT_TYPE_VALUETYPE || elemType == ELEMENT_TYPE_VOID) { // The only way for dwComponentSize to be large is to be part of a value class. If this changes // then the check will need to be moved outside valueclass check. if(dwComponentSize > MAX_SIZE_FOR_VALUECLASS_IN_ARRAY) { StackSString ssElemName; elemTypeHnd.GetName(ssElemName); StackScratchBuffer scratch; elemTypeHnd.GetAssembly()->ThrowTypeLoadException(ssElemName.GetUTF8(scratch), IDS_CLASSLOAD_VALUECLASSTOOLARGE); } } if (pClass != NULL) { pMT->SetClass(pClass); } else { pMT->SetCanonicalMethodTable(pCanonMT); } pMT->SetIsArray(arrayKind); pMT->SetArrayElementTypeHandle(elemTypeHnd); _ASSERTE(FitsIn<WORD>(dwComponentSize)); pMT->SetComponentSize(static_cast<WORD>(dwComponentSize)); pMT->SetLoaderModule(this); pMT->SetLoaderAllocator(pAllocator); pMT->SetModule(elemTypeHnd.GetModule()); if (elemTypeHnd.ContainsGenericVariables()) pMT->SetContainsGenericVariables(); #ifdef FEATURE_TYPEEQUIVALENCE if (elemTypeHnd.HasTypeEquivalence()) { // propagate the type equivalence flag pMT->SetHasTypeEquivalence(); } #endif // FEATURE_TYPEEQUIVALENCE _ASSERTE(pMT->IsClassPreInited()); // Set BaseSize to be size of non-data portion of the array DWORD baseSize = ARRAYBASE_BASESIZE; if (arrayKind == ELEMENT_TYPE_ARRAY) baseSize += Rank*sizeof(DWORD)*2; #if !defined(TARGET_64BIT) && (DATA_ALIGNMENT > 4) if (dwComponentSize >= DATA_ALIGNMENT) baseSize = (DWORD)ALIGN_UP(baseSize, DATA_ALIGNMENT); #endif // !defined(TARGET_64BIT) && (DATA_ALIGNMENT > 4) pMT->SetBaseSize(baseSize); // Because of array method table persisting, we need to copy the map for (unsigned index = 0; index < pParentClass->GetNumInterfaces(); ++index) { InterfaceInfo_t *pIntInfo = (InterfaceInfo_t *) (pMTHead + imapOffset + index * sizeof(InterfaceInfo_t)); pIntInfo->SetMethodTable((pParentClass->GetInterfaceMap() + index)->GetMethodTable()); } pMT->SetInterfaceMap(pParentClass->GetNumInterfaces(), (InterfaceInfo_t *)(pMTHead + imapOffset)); // Copy down flags for these interfaces as well. This is simplified a bit since we know that System.Array // only has a few interfaces and the flags will fit inline into the MethodTable's optional members. _ASSERTE(MethodTable::GetExtraInterfaceInfoSize(pParentClass->GetNumInterfaces()) == 0); pMT->InitializeExtraInterfaceInfo(NULL); for (UINT32 i = 0; i < pParentClass->GetNumInterfaces(); i++) { if (pParentClass->IsInterfaceDeclaredOnClass(i)) pMT->SetInterfaceDeclaredOnClass(i); } // The type is sufficiently initialized for most general purpose accessor methods to work. // Mark the type as restored to avoid asserts. Note that this also enables IBC logging. pMTWriteableData->SetIsRestoredForBuildArrayMethodTable(); { // Fill out the vtable indirection slots MethodTable::VtableIndirectionSlotIterator it = pMT->IterateVtableIndirectionSlots(); while (it.Next()) { if (canShareVtableChunks) { // Share the parent chunk it.SetIndirectionSlot(pParentClass->GetVtableIndirections()[it.GetIndex()]); } else { // Use the locally allocated chunk it.SetIndirectionSlot((MethodTable::VTableIndir2_t *)(pMemory+cbArrayClass+offsetOfUnsharedVtableChunks)); offsetOfUnsharedVtableChunks += it.GetSize(); } } // If we are not sharing parent chunks, copy down the slot contents if (!canShareVtableChunks) { // Copy top level class's vtable - note, vtable is contained within the MethodTable MethodTable::MethodDataWrapper hParentMTData(MethodTable::GetMethodData(pParentClass, FALSE)); for (UINT32 i = 0; i < numVirtuals; i++) { pMT->CopySlotFrom(i, hParentMTData, pParentClass); } } if (pClass != NULL) pMT->SetNonVirtualSlotsArray((PTR_PCODE)(pMemory+cbArrayClass+offsetOfNonVirtualSlots)); } #ifdef _DEBUG StackSString debugName; TypeString::AppendType(debugName, TypeHandle(pMT)); StackScratchBuffer buff; const char* pDebugNameUTF8 = debugName.GetUTF8(buff); S_SIZE_T safeLen = S_SIZE_T(strlen(pDebugNameUTF8))+S_SIZE_T(1); if(safeLen.IsOverflow()) COMPlusThrowHR(COR_E_OVERFLOW); size_t len = safeLen.Value(); char * name = (char*) pamTracker->Track(pAllocator-> GetHighFrequencyHeap()-> AllocMem(safeLen)); strcpy_s(name, len, pDebugNameUTF8); if (pClass != NULL) pClass->SetDebugClassName(name); pMT->SetDebugClassName(name); #endif // _DEBUG if (pClass != NULL) { // Count the number of method descs we need so we can allocate chunks. DWORD dwMethodDescs = numCtors + 3; // for rank specific Get, Set, Address MethodDescChunk * pChunks = MethodDescChunk::CreateChunk(pAllocator->GetHighFrequencyHeap(), dwMethodDescs, mcArray, FALSE /* fNonVtableSlot*/, FALSE /* fNativeCodeSlot */, FALSE /* fComPlusCallInfo */, pMT, pamTracker); pClass->SetChunks(pChunks); MethodTable::IntroducedMethodIterator it(pMT); DWORD dwMethodIndex = 0; for (; it.IsValid(); it.Next()) { ArrayMethodDesc* pNewMD = (ArrayMethodDesc *) it.GetMethodDesc(); _ASSERTE(pNewMD->GetClassification() == mcArray); DWORD dwFuncRank; DWORD dwFuncType; if (dwMethodIndex < ArrayMethodDesc::ARRAY_FUNC_CTOR) { // Generate a new stand-alone, Rank Specific Get, Set and Address method. dwFuncRank = Rank; dwFuncType = dwMethodIndex; } else { if (arrayKind == ELEMENT_TYPE_SZARRAY) { // For SZARRAY arrays, set up multiple constructors. dwFuncRank = 1 + (dwMethodIndex - ArrayMethodDesc::ARRAY_FUNC_CTOR); } else { // ELEMENT_TYPE_ARRAY has two constructors, one without lower bounds and one with lower bounds _ASSERTE((dwMethodIndex == ArrayMethodDesc::ARRAY_FUNC_CTOR) || (dwMethodIndex == ArrayMethodDesc::ARRAY_FUNC_CTOR+1)); dwFuncRank = (dwMethodIndex == ArrayMethodDesc::ARRAY_FUNC_CTOR) ? Rank : 2 * Rank; } dwFuncType = ArrayMethodDesc::ARRAY_FUNC_CTOR; } PCCOR_SIGNATURE pSig; DWORD cSig; pClass->GenerateArrayAccessorCallSig(dwFuncRank, dwFuncType, &pSig, &cSig, pAllocator, pamTracker #ifdef FEATURE_ARRAYSTUB_AS_IL ,0 #endif ); pClass->InitArrayMethodDesc(pNewMD, pSig, cSig, numVirtuals + dwMethodIndex, pAllocator, pamTracker); dwMethodIndex++; } _ASSERTE(dwMethodIndex == dwMethodDescs); } // Set up GC information if (elemType == ELEMENT_TYPE_VALUETYPE || elemType == ELEMENT_TYPE_VOID) { // If it's an array of value classes, there is a different format for the GCDesc if it contains pointers if (pElemMT->ContainsPointers()) { CGCDescSeries *pSeries; // There must be only one series for value classes CGCDescSeries *pByValueSeries = CGCDesc::GetCGCDescFromMT(pElemMT)->GetHighestSeries(); pMT->SetContainsPointers(); // negative series has a special meaning, indicating a different form of GCDesc SSIZE_T nSeries = (SSIZE_T) CGCDesc::GetCGCDescFromMT(pElemMT)->GetNumSeries(); CGCDesc::GetCGCDescFromMT(pMT)->InitValueClassSeries(pMT, nSeries); pSeries = CGCDesc::GetCGCDescFromMT(pMT)->GetHighestSeries(); // sort by offset SSIZE_T AllocSizeSeries; if (!ClrSafeInt<SSIZE_T>::multiply(sizeof(CGCDescSeries*), nSeries, AllocSizeSeries)) COMPlusThrowOM(); CGCDescSeries** sortedSeries = (CGCDescSeries**) _alloca(AllocSizeSeries); int index; for (index = 0; index < nSeries; index++) sortedSeries[index] = &pByValueSeries[-index]; // section sort for (int i = 0; i < nSeries; i++) { for (int j = i+1; j < nSeries; j++) if (sortedSeries[j]->GetSeriesOffset() < sortedSeries[i]->GetSeriesOffset()) { CGCDescSeries* temp = sortedSeries[i]; sortedSeries[i] = sortedSeries[j]; sortedSeries[j] = temp; } } // Offset of the first pointer in the array // This equals the offset of the first pointer if this were an array of entirely pointers, plus the offset of the // first pointer in the value class pSeries->SetSeriesOffset(ArrayBase::GetDataPtrOffset(pMT) + (sortedSeries[0]->GetSeriesOffset()) - OBJECT_SIZE); for (index = 0; index < nSeries; index ++) { size_t numPtrsInBytes = sortedSeries[index]->GetSeriesSize() + pElemMT->GetBaseSize(); size_t currentOffset; size_t skip; currentOffset = sortedSeries[index]->GetSeriesOffset()+numPtrsInBytes; if (index != nSeries-1) { skip = sortedSeries[index+1]->GetSeriesOffset()-currentOffset; } else if (index == 0) { skip = pElemMT->GetNumInstanceFieldBytes() - numPtrsInBytes; } else { skip = sortedSeries[0]->GetSeriesOffset() + pElemMT->GetBaseSize() - OBJECT_BASESIZE - currentOffset; } _ASSERTE(!"Module::CreateArrayMethodTable() - unaligned GC info" || IS_ALIGNED(skip, TARGET_POINTER_SIZE)); unsigned short NumPtrs = (unsigned short) (numPtrsInBytes / TARGET_POINTER_SIZE); if(skip > MAX_SIZE_FOR_VALUECLASS_IN_ARRAY || numPtrsInBytes > MAX_PTRS_FOR_VALUECLASSS_IN_ARRAY) { StackSString ssElemName; elemTypeHnd.GetName(ssElemName); StackScratchBuffer scratch; elemTypeHnd.GetAssembly()->ThrowTypeLoadException(ssElemName.GetUTF8(scratch), IDS_CLASSLOAD_VALUECLASSTOOLARGE); } val_serie_item *val_item = &(pSeries->val_serie[-index]); val_item->set_val_serie_item (NumPtrs, (unsigned short)skip); } } } else if (CorTypeInfo::IsObjRef(elemType)) { CGCDescSeries *pSeries; pMT->SetContainsPointers(); // This array is all GC Pointers CGCDesc::GetCGCDescFromMT(pMT)->Init( pMT, 1 ); pSeries = CGCDesc::GetCGCDescFromMT(pMT)->GetHighestSeries(); pSeries->SetSeriesOffset(ArrayBase::GetDataPtrOffset(pMT)); // For arrays, the size is the negative of the BaseSize (the GC always adds the total // size of the object, so what you end up with is the size of the data portion of the array) pSeries->SetSeriesSize(-(SSIZE_T)(pMT->GetBaseSize())); } // If we get here we are assuming that there was no truncation. If this is not the case then // an array whose base type is not a value class was created and was larger then 0xffff (a word) _ASSERTE(dwComponentSize == pMT->GetComponentSize()); return(pMT); } // Module::CreateArrayMethodTable #ifdef FEATURE_ARRAYSTUB_AS_IL class ArrayOpLinker : public ILStubLinker { ILCodeStream * m_pCode; ArrayMethodDesc * m_pMD; SigTypeContext m_emptyContext; public: ArrayOpLinker(ArrayMethodDesc * pMD) : ILStubLinker(pMD->GetModule(), pMD->GetSignature(), &m_emptyContext, pMD, (ILStubLinkerFlags)(ILSTUB_LINKER_FLAG_STUB_HAS_THIS | ILSTUB_LINKER_FLAG_TARGET_HAS_THIS)) { m_pCode = NewCodeStream(kDispatch); m_pMD = pMD; } void EmitStub() { MethodTable *pMT = m_pMD->GetMethodTable(); BOOL fHasLowerBounds = pMT->GetInternalCorElementType() == ELEMENT_TYPE_ARRAY; DWORD dwTotalLocalNum = NewLocal(ELEMENT_TYPE_I4); DWORD dwLengthLocalNum = NewLocal(ELEMENT_TYPE_I4); mdToken tokRawData = GetToken(CoreLibBinder::GetField(FIELD__RAW_DATA__DATA)); ILCodeLabel * pRangeExceptionLabel = NewCodeLabel(); ILCodeLabel * pRangeExceptionLabel1 = NewCodeLabel(); ILCodeLabel * pCheckDone = NewCodeLabel(); ILCodeLabel * pNotSZArray = NewCodeLabel(); ILCodeLabel * pTypeMismatchExceptionLabel = NULL; UINT rank = pMT->GetRank(); UINT firstIdx = 0; UINT hiddenArgIdx = rank; _ASSERTE(rank>0); #ifndef TARGET_X86 if(m_pMD->GetArrayFuncIndex() == ArrayMethodDesc::ARRAY_FUNC_ADDRESS) { firstIdx = 1; hiddenArgIdx = 0; } #endif ArrayClass *pcls = (ArrayClass*)(pMT->GetClass()); if(pcls->GetArrayElementType() == ELEMENT_TYPE_CLASS) { // Type Check if(m_pMD->GetArrayFuncIndex() == ArrayMethodDesc::ARRAY_FUNC_SET) { ILCodeLabel * pTypeCheckOK = NewCodeLabel(); m_pCode->EmitLDARG(rank); // load value to store m_pCode->EmitBRFALSE(pTypeCheckOK); //Storing NULL is OK m_pCode->EmitLDARG(rank); // return param m_pCode->EmitLDFLDA(tokRawData); m_pCode->EmitLDC(Object::GetOffsetOfFirstField()); m_pCode->EmitSUB(); m_pCode->EmitLDIND_I(); // TypeHandle m_pCode->EmitLoadThis(); m_pCode->EmitLDFLDA(tokRawData); m_pCode->EmitLDC(Object::GetOffsetOfFirstField()); m_pCode->EmitSUB(); m_pCode->EmitLDIND_I(); // Array MT m_pCode->EmitLDC(MethodTable::GetOffsetOfArrayElementTypeHandle()); m_pCode->EmitADD(); m_pCode->EmitLDIND_I(); m_pCode->EmitCEQ(); m_pCode->EmitBRTRUE(pTypeCheckOK); // Same type is OK // Call type check helper m_pCode->EmitLDARG(rank); m_pCode->EmitLoadThis(); m_pCode->EmitCALL(METHOD__STUBHELPERS__ARRAY_TYPE_CHECK,2,0); m_pCode->EmitLabel(pTypeCheckOK); } else if(m_pMD->GetArrayFuncIndex() == ArrayMethodDesc::ARRAY_FUNC_ADDRESS) { // Check that the hidden param is same type ILCodeLabel *pTypeCheckPassed = NewCodeLabel(); pTypeMismatchExceptionLabel = NewCodeLabel(); m_pCode->EmitLDARG(hiddenArgIdx); // hidden param m_pCode->EmitBRFALSE(pTypeCheckPassed); m_pCode->EmitLDARG(hiddenArgIdx); m_pCode->EmitLoadThis(); m_pCode->EmitLDFLDA(tokRawData); m_pCode->EmitLDC(Object::GetOffsetOfFirstField()); m_pCode->EmitSUB(); m_pCode->EmitLDIND_I(); // Array MT m_pCode->EmitCEQ(); m_pCode->EmitBRFALSE(pTypeMismatchExceptionLabel); // throw exception if not same m_pCode->EmitLabel(pTypeCheckPassed); } } if(rank == 1 && fHasLowerBounds) { // check if the array is SZArray. m_pCode->EmitLoadThis(); m_pCode->EmitLDFLDA(tokRawData); m_pCode->EmitLDC(Object::GetOffsetOfFirstField()); m_pCode->EmitSUB(); m_pCode->EmitLDIND_I(); m_pCode->EmitLDC(MethodTable::GetOffsetOfFlags()); m_pCode->EmitADD(); m_pCode->EmitLDIND_I4(); m_pCode->EmitLDC(MethodTable::GetIfArrayThenSzArrayFlag()); m_pCode->EmitAND(); m_pCode->EmitBRFALSE(pNotSZArray); // goto multi-dimmArray code if not szarray // it is SZArray // bounds check m_pCode->EmitLoadThis(); m_pCode->EmitLDFLDA(tokRawData); m_pCode->EmitLDC(ArrayBase::GetOffsetOfNumComponents() - Object::GetOffsetOfFirstField()); m_pCode->EmitADD(); m_pCode->EmitLDIND_I4(); m_pCode->EmitLDARG(firstIdx); m_pCode->EmitBLE_UN(pRangeExceptionLabel); m_pCode->EmitLoadThis(); m_pCode->EmitLDFLDA(tokRawData); m_pCode->EmitLDC(ArrayBase::GetBoundsOffset(pMT) - Object::GetOffsetOfFirstField()); m_pCode->EmitADD(); m_pCode->EmitLDARG(firstIdx); m_pCode->EmitBR(pCheckDone); m_pCode->EmitLabel(pNotSZArray); } for (UINT i = 0; i < rank; i++) { // Cache length m_pCode->EmitLoadThis(); m_pCode->EmitLDFLDA(tokRawData); m_pCode->EmitLDC((ArrayBase::GetBoundsOffset(pMT) - Object::GetOffsetOfFirstField()) + i*sizeof(DWORD)); m_pCode->EmitADD(); m_pCode->EmitLDIND_I4(); m_pCode->EmitSTLOC(dwLengthLocalNum); // Fetch index m_pCode->EmitLDARG(firstIdx + i); if (fHasLowerBounds) { // Load lower bound m_pCode->EmitLoadThis(); m_pCode->EmitLDFLDA(tokRawData); m_pCode->EmitLDC((ArrayBase::GetLowerBoundsOffset(pMT) - Object::GetOffsetOfFirstField()) + i*sizeof(DWORD)); m_pCode->EmitADD(); m_pCode->EmitLDIND_I4(); // Subtract lower bound m_pCode->EmitSUB(); } // Compare with length m_pCode->EmitDUP(); m_pCode->EmitLDLOC(dwLengthLocalNum); m_pCode->EmitBGE_UN(pRangeExceptionLabel1); // Add to the running total if we have one already if (i > 0) { m_pCode->EmitLDLOC(dwTotalLocalNum); m_pCode->EmitLDLOC(dwLengthLocalNum); m_pCode->EmitMUL(); m_pCode->EmitADD(); } m_pCode->EmitSTLOC(dwTotalLocalNum); } // Compute element address m_pCode->EmitLoadThis(); m_pCode->EmitLDFLDA(tokRawData); m_pCode->EmitLDC(ArrayBase::GetDataPtrOffset(pMT) - Object::GetOffsetOfFirstField()); m_pCode->EmitADD(); m_pCode->EmitLDLOC(dwTotalLocalNum); m_pCode->EmitLabel(pCheckDone); m_pCode->EmitCONV_U(); SIZE_T elemSize = pMT->GetComponentSize(); if (elemSize != 1) { m_pCode->EmitLDC(elemSize); m_pCode->EmitMUL(); } m_pCode->EmitADD(); LocalDesc elemType(pMT->GetArrayElementTypeHandle().GetInternalCorElementType()); switch (m_pMD->GetArrayFuncIndex()) { case ArrayMethodDesc::ARRAY_FUNC_GET: if(elemType.ElementType[0]==ELEMENT_TYPE_VALUETYPE) { m_pCode->EmitLDOBJ(GetToken(pMT->GetArrayElementTypeHandle())); } else m_pCode->EmitLDIND_T(&elemType); break; case ArrayMethodDesc::ARRAY_FUNC_SET: // Value to store into the array m_pCode->EmitLDARG(rank); if(elemType.ElementType[0]==ELEMENT_TYPE_VALUETYPE) { m_pCode->EmitSTOBJ(GetToken(pMT->GetArrayElementTypeHandle())); } else m_pCode->EmitSTIND_T(&elemType); break; case ArrayMethodDesc::ARRAY_FUNC_ADDRESS: break; default: _ASSERTE(!"Unknown ArrayFuncIndex"); } m_pCode->EmitRET(); m_pCode->EmitLDC(0); m_pCode->EmitLabel(pRangeExceptionLabel1); // Assumes that there is one "int" pushed on the stack m_pCode->EmitPOP(); mdToken tokIndexOutOfRangeCtorExcep = GetToken((CoreLibBinder::GetException(kIndexOutOfRangeException))->GetDefaultConstructor()); m_pCode->EmitLabel(pRangeExceptionLabel); m_pCode->EmitNEWOBJ(tokIndexOutOfRangeCtorExcep, 0); m_pCode->EmitTHROW(); if(pTypeMismatchExceptionLabel != NULL) { mdToken tokTypeMismatchExcepCtor = GetToken((CoreLibBinder::GetException(kArrayTypeMismatchException))->GetDefaultConstructor()); m_pCode->EmitLabel(pTypeMismatchExceptionLabel); m_pCode->EmitNEWOBJ(tokTypeMismatchExcepCtor, 0); m_pCode->EmitTHROW(); } } }; Stub *GenerateArrayOpStub(ArrayMethodDesc* pMD) { STANDARD_VM_CONTRACT; ArrayOpLinker sl(pMD); sl.EmitStub(); PCCOR_SIGNATURE pSig; DWORD cbSig; AllocMemTracker amTracker; if (pMD->GetArrayFuncIndex() == ArrayMethodDesc::ARRAY_FUNC_ADDRESS) { // The stub has to have signature with explicit hidden argument instead of CORINFO_CALLCONV_PARAMTYPE. // Generate a new signature for the stub here. ((ArrayClass*)(pMD->GetMethodTable()->GetClass()))->GenerateArrayAccessorCallSig(pMD->GetMethodTable()->GetRank(), ArrayMethodDesc::ARRAY_FUNC_ADDRESS, &pSig, &cbSig, pMD->GetLoaderAllocator(), &amTracker, 1); } else { pMD->GetSig(&pSig,&cbSig); } amTracker.SuppressRelease(); static const ILStubTypes stubTypes[3] = { ILSTUB_ARRAYOP_GET, ILSTUB_ARRAYOP_SET, ILSTUB_ARRAYOP_ADDRESS }; _ASSERTE(pMD->GetArrayFuncIndex() <= ARRAY_SIZE(stubTypes)); NDirectStubFlags arrayOpStubFlag = (NDirectStubFlags)stubTypes[pMD->GetArrayFuncIndex()]; MethodDesc * pStubMD = ILStubCache::CreateAndLinkNewILStubMethodDesc(pMD->GetLoaderAllocator(), pMD->GetMethodTable(), arrayOpStubFlag, pMD->GetModule(), pSig, cbSig, NULL, &sl); return Stub::NewStub(JitILStub(pStubMD)); } #else // FEATURE_ARRAYSTUB_AS_IL //======================================================================== // Generates the platform-independent arrayop stub. //======================================================================== void GenerateArrayOpScript(ArrayMethodDesc *pMD, ArrayOpScript *paos) { STANDARD_VM_CONTRACT; ArrayOpIndexSpec *pai = NULL; MethodTable *pMT = pMD->GetMethodTable(); ArrayClass *pcls = (ArrayClass*)(pMT->GetClass()); // The ArrayOpScript and ArrayOpIndexSpec structs double as hash keys // for the ArrayStubCache. Thus, it's imperative that there be no // unused "pad" fields that contain unstable values. // pMT->GetRank() is bounded so the arithmetics here is safe. memset(paos, 0, sizeof(ArrayOpScript) + sizeof(ArrayOpIndexSpec) * pMT->GetRank()); paos->m_rank = (BYTE)(pMT->GetRank()); paos->m_fHasLowerBounds = (pMT->GetInternalCorElementType() == ELEMENT_TYPE_ARRAY); paos->m_ofsoffirst = ArrayBase::GetDataPtrOffset(pMT); switch (pMD->GetArrayFuncIndex()) { case ArrayMethodDesc::ARRAY_FUNC_GET: paos->m_op = ArrayOpScript::LOAD; break; case ArrayMethodDesc::ARRAY_FUNC_SET: paos->m_op = ArrayOpScript::STORE; break; case ArrayMethodDesc::ARRAY_FUNC_ADDRESS: paos->m_op = ArrayOpScript::LOADADDR; break; default: _ASSERTE(!"Unknown array func!"); } MetaSig msig(pMD); _ASSERTE(!msig.IsVarArg()); // No array signature is varargs, code below does not expect it. switch (pMT->GetArrayElementTypeHandle().GetInternalCorElementType()) { // These are all different because of sign extension case ELEMENT_TYPE_I1: paos->m_elemsize = 1; paos->m_signed = TRUE; break; case ELEMENT_TYPE_BOOLEAN: case ELEMENT_TYPE_U1: paos->m_elemsize = 1; break; case ELEMENT_TYPE_I2: paos->m_elemsize = 2; paos->m_signed = TRUE; break; case ELEMENT_TYPE_CHAR: case ELEMENT_TYPE_U2: paos->m_elemsize = 2; break; case ELEMENT_TYPE_I4: IN_TARGET_32BIT(case ELEMENT_TYPE_I:) paos->m_elemsize = 4; paos->m_signed = TRUE; break; case ELEMENT_TYPE_U4: IN_TARGET_32BIT(case ELEMENT_TYPE_U:) IN_TARGET_32BIT(case ELEMENT_TYPE_PTR:) paos->m_elemsize = 4; break; case ELEMENT_TYPE_I8: IN_TARGET_64BIT(case ELEMENT_TYPE_I:) paos->m_elemsize = 8; paos->m_signed = TRUE; break; case ELEMENT_TYPE_U8: IN_TARGET_64BIT(case ELEMENT_TYPE_U:) IN_TARGET_64BIT(case ELEMENT_TYPE_PTR:) paos->m_elemsize = 8; break; case ELEMENT_TYPE_R4: paos->m_elemsize = 4; paos->m_flags |= paos->ISFPUTYPE; break; case ELEMENT_TYPE_R8: paos->m_elemsize = 8; paos->m_flags |= paos->ISFPUTYPE; break; case ELEMENT_TYPE_SZARRAY: case ELEMENT_TYPE_ARRAY: case ELEMENT_TYPE_CLASS: case ELEMENT_TYPE_STRING: case ELEMENT_TYPE_OBJECT: paos->m_elemsize = sizeof(LPVOID); paos->m_flags |= paos->NEEDSWRITEBARRIER; if (paos->m_op != ArrayOpScript::LOAD) { paos->m_flags |= paos->NEEDSTYPECHECK; } break; case ELEMENT_TYPE_VALUETYPE: paos->m_elemsize = pMT->GetComponentSize(); if (pMT->ContainsPointers()) { paos->m_gcDesc = CGCDesc::GetCGCDescFromMT(pMT); paos->m_flags |= paos->NEEDSWRITEBARRIER; } break; default: _ASSERTE(!"Unsupported Array Type!"); } ArgIterator argit(&msig); #ifdef TARGET_X86 paos->m_cbretpop = argit.CbStackPop(); #endif if (argit.HasRetBuffArg()) { paos->m_flags |= ArrayOpScript::HASRETVALBUFFER; paos->m_fRetBufLoc = argit.GetRetBuffArgOffset(); } if (paos->m_op == ArrayOpScript::LOADADDR) { paos->m_typeParamOffs = argit.GetParamTypeArgOffset(); } for (UINT idx = 0; idx < paos->m_rank; idx++) { pai = (ArrayOpIndexSpec*)(paos->GetArrayOpIndexSpecs() + idx); pai->m_idxloc = argit.GetNextOffset(); pai->m_lboundofs = paos->m_fHasLowerBounds ? (UINT32) (ArrayBase::GetLowerBoundsOffset(pMT) + idx*sizeof(DWORD)) : 0; pai->m_lengthofs = ArrayBase::GetBoundsOffset(pMT) + idx*sizeof(DWORD); } if (paos->m_op == paos->STORE) { paos->m_fValLoc = argit.GetNextOffset(); } } //--------------------------------------------------------- // Cache for array stubs //--------------------------------------------------------- class ArrayStubCache : public StubCacheBase { virtual void CompileStub(const BYTE *pRawStub, StubLinker *psl); virtual UINT Length(const BYTE *pRawStub); public: public: ArrayStubCache(LoaderHeap* heap) : StubCacheBase(heap) { } static ArrayStubCache * GetArrayStubCache() { STANDARD_VM_CONTRACT; static ArrayStubCache * s_pArrayStubCache = NULL; if (s_pArrayStubCache == NULL) { ArrayStubCache * pArrayStubCache = new ArrayStubCache(SystemDomain::GetGlobalLoaderAllocator()->GetStubHeap()); if (FastInterlockCompareExchangePointer(&s_pArrayStubCache, pArrayStubCache, NULL) != NULL) delete pArrayStubCache; } return s_pArrayStubCache; } }; Stub *GenerateArrayOpStub(ArrayMethodDesc* pMD) { STANDARD_VM_CONTRACT; MethodTable *pMT = pMD->GetMethodTable(); ArrayOpScript *paos = (ArrayOpScript*)_alloca(sizeof(ArrayOpScript) + sizeof(ArrayOpIndexSpec) * pMT->GetRank()); GenerateArrayOpScript(pMD, paos); Stub *pArrayOpStub; pArrayOpStub = ArrayStubCache::GetArrayStubCache()->Canonicalize((const BYTE *)paos); if (pArrayOpStub == NULL) COMPlusThrowOM(); return pArrayOpStub; } void ArrayStubCache::CompileStub(const BYTE *pRawStub, StubLinker *psl) { STANDARD_VM_CONTRACT; ((CPUSTUBLINKER*)psl)->EmitArrayOpStub((ArrayOpScript*)pRawStub); } UINT ArrayStubCache::Length(const BYTE *pRawStub) { LIMITED_METHOD_CONTRACT; return ((ArrayOpScript*)pRawStub)->Length(); } #endif // FEATURE_ARRAYSTUB_AS_IL //--------------------------------------------------------------------- // This method returns TRUE if pInterfaceMT could be one of the interfaces // that are implicitly implemented by SZArrays BOOL IsImplicitInterfaceOfSZArray(MethodTable *pInterfaceMT) { LIMITED_METHOD_CONTRACT; PRECONDITION(pInterfaceMT->IsInterface()); PRECONDITION(pInterfaceMT->HasInstantiation()); // Is target interface Anything<T> in CoreLib? if (!pInterfaceMT->HasInstantiation() || !pInterfaceMT->GetModule()->IsSystem()) return FALSE; unsigned rid = pInterfaceMT->GetTypeDefRid(); // Is target interface IList<T> or one of its ancestors, or IReadOnlyList<T>? return (rid == CoreLibBinder::GetExistingClass(CLASS__ILISTGENERIC)->GetTypeDefRid() || rid == CoreLibBinder::GetExistingClass(CLASS__ICOLLECTIONGENERIC)->GetTypeDefRid() || rid == CoreLibBinder::GetExistingClass(CLASS__IENUMERABLEGENERIC)->GetTypeDefRid() || rid == CoreLibBinder::GetExistingClass(CLASS__IREADONLYCOLLECTIONGENERIC)->GetTypeDefRid() || rid == CoreLibBinder::GetExistingClass(CLASS__IREADONLYLISTGENERIC)->GetTypeDefRid()); } //---------------------------------------------------------------------------------- // Calls to (IList<T>)(array).Meth are actually implemented by SZArrayHelper.Meth<T> // This workaround exists for two reasons: // // - For working set reasons, we don't want insert these methods in the array hierachy // in the normal way. // - For platform and devtime reasons, we still want to use the C# compiler to generate // the method bodies. // // (Though it's questionable whether any devtime was saved.) // // This method takes care of the mapping between the two. Give it a method // IList<T>.Meth, and it will return SZArrayHelper.Meth<T>. //---------------------------------------------------------------------------------- MethodDesc* GetActualImplementationForArrayGenericIListOrIReadOnlyListMethod(MethodDesc *pItfcMeth, TypeHandle theT) { CONTRACTL { THROWS; GC_TRIGGERS; INJECT_FAULT(COMPlusThrowOM()); } CONTRACTL_END int slot = pItfcMeth->GetSlot(); // We need to pick the right starting method depending on the depth of the inheritance chain static const BinderMethodID startingMethod[] = { METHOD__SZARRAYHELPER__GETENUMERATOR, // First method of IEnumerable`1 METHOD__SZARRAYHELPER__GET_COUNT, // First method of ICollection`1/IReadOnlyCollection`1 METHOD__SZARRAYHELPER__GET_ITEM // First method of IList`1/IReadOnlyList`1 }; // Subtract one for the non-generic IEnumerable that the generic enumerable inherits from unsigned int inheritanceDepth = pItfcMeth->GetMethodTable()->GetNumInterfaces() - 1; PREFIX_ASSUME(0 <= inheritanceDepth && inheritanceDepth < ARRAY_SIZE(startingMethod)); MethodDesc *pGenericImplementor = CoreLibBinder::GetMethod((BinderMethodID)(startingMethod[inheritanceDepth] + slot)); // The most common reason for this assert is that the order of the SZArrayHelper methods in // corelib.h does not match the order they are implemented on the generic interfaces. _ASSERTE(pGenericImplementor == MemberLoader::FindMethodByName(g_pSZArrayHelperClass, pItfcMeth->GetName())); // OPTIMIZATION: For any method other than GetEnumerator(), we can safely substitute // "Object" for reference-type theT's. This causes fewer methods to be instantiated. if (startingMethod[inheritanceDepth] != METHOD__SZARRAYHELPER__GETENUMERATOR && !theT.IsValueType()) { theT = TypeHandle(g_pObjectClass); } MethodDesc *pActualImplementor = MethodDesc::FindOrCreateAssociatedMethodDesc(pGenericImplementor, g_pSZArrayHelperClass, FALSE, Instantiation(&theT, 1), FALSE // allowInstParam ); _ASSERTE(pActualImplementor); return pActualImplementor; } #endif // DACCESS_COMPILE CorElementType GetNormalizedIntegralArrayElementType(CorElementType elementType) { LIMITED_METHOD_CONTRACT; _ASSERTE(CorTypeInfo::IsPrimitiveType_NoThrow(elementType)); // Array Primitive types such as E_T_I4 and E_T_U4 are interchangeable // Enums with interchangeable underlying types are interchangable // BOOL is NOT interchangeable with I1/U1, neither CHAR -- with I2/U2 switch (elementType) { case ELEMENT_TYPE_U1: case ELEMENT_TYPE_U2: case ELEMENT_TYPE_U4: case ELEMENT_TYPE_U8: case ELEMENT_TYPE_U: return (CorElementType)(elementType - 1); // normalize to signed type default: break; } return elementType; }
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/jit/fginline.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif // Flowgraph Inline Support /*****************************************************************************/ //------------------------------------------------------------------------ // fgCheckForInlineDepthAndRecursion: compute depth of the candidate, and // check for recursion. // // Return Value: // The depth of the inline candidate. The root method is a depth 0, top-level // candidates at depth 1, etc. // // Notes: // We generally disallow recursive inlines by policy. However, they are // supported by the underlying machinery. // // Likewise the depth limit is a policy consideration, and serves mostly // as a safeguard to prevent runaway inlining of small methods. // unsigned Compiler::fgCheckInlineDepthAndRecursion(InlineInfo* inlineInfo) { BYTE* candidateCode = inlineInfo->inlineCandidateInfo->methInfo.ILCode; InlineContext* inlineContext = inlineInfo->inlineCandidateInfo->inlinersContext; InlineResult* inlineResult = inlineInfo->inlineResult; // There should be a context for all candidates. assert(inlineContext != nullptr); int depth = 0; for (; inlineContext != nullptr; inlineContext = inlineContext->GetParent()) { assert(inlineContext->GetCode() != nullptr); depth++; if (inlineContext->GetCode() == candidateCode) { // This inline candidate has the same IL code buffer as an already // inlined method does. inlineResult->NoteFatal(InlineObservation::CALLSITE_IS_RECURSIVE); // No need to note CALLSITE_DEPTH we're already rejecting this candidate return depth; } if (depth > InlineStrategy::IMPLEMENTATION_MAX_INLINE_DEPTH) { break; } } inlineResult->NoteInt(InlineObservation::CALLSITE_DEPTH, depth); return depth; } //------------------------------------------------------------------------ // fgInline - expand inline candidates // // Returns: // phase status indicating if anything was modified // // Notes: // Inline candidates are identified during importation and candidate calls // must be top-level expressions. In input IR, the result of the call (if any) // is consumed elsewhere by a GT_RET_EXPR node. // // For successful inlines, calls are replaced by a sequence of argument setup // instructions, the inlined method body, and return value cleanup. Note // Inlining may introduce new inline candidates. These are processed in a // depth-first fashion, as the inliner walks the IR in statement order. // // After inline expansion in a statement, the statement tree // is walked to locate GT_RET_EXPR nodes. These are replaced by either // * the original call tree, if the inline failed // * the return value tree from the inlinee, if the inline succeeded // // This replacement happens in preorder; on the postorder side of the same // tree walk, we look for opportunties to devirtualize or optimize now that // we know the context for the newly supplied return value tree. // // Inline arguments may be directly substituted into the body of the inlinee // in some cases. See impInlineFetchArg. // PhaseStatus Compiler::fgInline() { if (!opts.OptEnabled(CLFLG_INLINING)) { return PhaseStatus::MODIFIED_NOTHING; } #ifdef DEBUG fgPrintInlinedMethods = JitConfig.JitPrintInlinedMethods().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args); #endif // DEBUG noway_assert(fgFirstBB != nullptr); BasicBlock* block = fgFirstBB; bool madeChanges = false; do { // Make the current basic block address available globally compCurBB = block; for (Statement* const stmt : block->Statements()) { #if defined(DEBUG) || defined(INLINE_DATA) // In debug builds we want the inline tree to show all failed // inlines. Some inlines may fail very early and never make it to // candidate stage. So scan the tree looking for those early failures. fgWalkTreePre(stmt->GetRootNodePointer(), fgFindNonInlineCandidate, stmt); #endif GenTree* expr = stmt->GetRootNode(); // The importer ensures that all inline candidates are // statement expressions. So see if we have a call. if (expr->IsCall()) { GenTreeCall* call = expr->AsCall(); // We do. Is it an inline candidate? // // Note we also process GuardeDevirtualizationCandidates here as we've // split off GT_RET_EXPRs for them even when they are not inline candidates // as we need similar processing to ensure they get patched back to where // they belong. if (call->IsInlineCandidate() || call->IsGuardedDevirtualizationCandidate()) { InlineResult inlineResult(this, call, stmt, "fgInline"); fgMorphStmt = stmt; fgMorphCallInline(call, &inlineResult); // If there's a candidate to process, we will make changes madeChanges = true; // fgMorphCallInline may have updated the // statement expression to a GT_NOP if the // call returned a value, regardless of // whether the inline succeeded or failed. // // If so, remove the GT_NOP and continue // on with the next statement. if (stmt->GetRootNode()->IsNothingNode()) { fgRemoveStmt(block, stmt); continue; } } } // See if we need to replace some return value place holders. // Also, see if this replacement enables further devirtualization. // // Note we have both preorder and postorder callbacks here. // // The preorder callback is responsible for replacing GT_RET_EXPRs // with the appropriate expansion (call or inline result). // Replacement may introduce subtrees with GT_RET_EXPR and so // we rely on the preorder to recursively process those as well. // // On the way back up, the postorder callback then re-examines nodes for // possible further optimization, as the (now complete) GT_RET_EXPR // replacement may have enabled optimizations by providing more // specific types for trees or variables. fgWalkTree(stmt->GetRootNodePointer(), fgUpdateInlineReturnExpressionPlaceHolder, fgLateDevirtualization, (void*)&madeChanges); // See if stmt is of the form GT_COMMA(call, nop) // If yes, we can get rid of GT_COMMA. if (expr->OperGet() == GT_COMMA && expr->AsOp()->gtOp1->OperGet() == GT_CALL && expr->AsOp()->gtOp2->OperGet() == GT_NOP) { madeChanges = true; stmt->SetRootNode(expr->AsOp()->gtOp1); } } block = block->bbNext; } while (block); #ifdef DEBUG // Check that we should not have any inline candidate or return value place holder left. block = fgFirstBB; noway_assert(block); do { for (Statement* const stmt : block->Statements()) { // Call Compiler::fgDebugCheckInlineCandidates on each node fgWalkTreePre(stmt->GetRootNodePointer(), fgDebugCheckInlineCandidates); } block = block->bbNext; } while (block); fgVerifyHandlerTab(); if (verbose || fgPrintInlinedMethods) { JITDUMP("**************** Inline Tree"); printf("\n"); m_inlineStrategy->Dump(verbose || JitConfig.JitPrintInlinedMethodsVerbose()); } #endif // DEBUG return madeChanges ? PhaseStatus::MODIFIED_EVERYTHING : PhaseStatus::MODIFIED_NOTHING; } #if defined(DEBUG) || defined(INLINE_DATA) //------------------------------------------------------------------------ // fgFindNonInlineCandidate: tree walk helper to ensure that a tree node // that is not an inline candidate is noted as a failed inline. // // Arguments: // pTree - pointer to pointer tree node being walked // data - contextual data for the walk // // Return Value: // walk result // // Note: // Invokes fgNoteNonInlineCandidate on the nodes it finds. Compiler::fgWalkResult Compiler::fgFindNonInlineCandidate(GenTree** pTree, fgWalkData* data) { GenTree* tree = *pTree; if (tree->gtOper == GT_CALL) { Compiler* compiler = data->compiler; Statement* stmt = (Statement*)data->pCallbackData; GenTreeCall* call = tree->AsCall(); compiler->fgNoteNonInlineCandidate(stmt, call); } return WALK_CONTINUE; } //------------------------------------------------------------------------ // fgNoteNonInlineCandidate: account for inlining failures in calls // not marked as inline candidates. // // Arguments: // stmt - statement containing the call // call - the call itself // // Notes: // Used in debug only to try and place descriptions of inline failures // into the proper context in the inline tree. void Compiler::fgNoteNonInlineCandidate(Statement* stmt, GenTreeCall* call) { if (call->IsInlineCandidate() || call->IsGuardedDevirtualizationCandidate()) { return; } InlineResult inlineResult(this, call, nullptr, "fgNoteNonInlineCandidate"); InlineObservation currentObservation = InlineObservation::CALLSITE_NOT_CANDIDATE; // Try and recover the reason left behind when the jit decided // this call was not a candidate. InlineObservation priorObservation = call->gtInlineObservation; if (InlIsValidObservation(priorObservation)) { currentObservation = priorObservation; } // Propagate the prior failure observation to this result. inlineResult.NotePriorFailure(currentObservation); inlineResult.SetReported(); if (call->gtCallType == CT_USER_FUNC) { m_inlineStrategy->NewContext(call->gtInlineContext, stmt, call)->SetFailed(&inlineResult); } } #endif #if FEATURE_MULTIREG_RET /********************************************************************************* * * tree - The node which needs to be converted to a struct pointer. * * Return the pointer by either __replacing__ the tree node with a suitable pointer * type or __without replacing__ and just returning a subtree or by __modifying__ * a subtree. */ GenTree* Compiler::fgGetStructAsStructPtr(GenTree* tree) { noway_assert(tree->OperIs(GT_LCL_VAR, GT_FIELD, GT_IND, GT_BLK, GT_OBJ, GT_COMMA) || tree->OperIsSIMD() || tree->OperIsHWIntrinsic()); // GT_CALL, cannot get address of call. // GT_MKREFANY, inlining should've been aborted due to mkrefany opcode. // GT_RET_EXPR, cannot happen after fgUpdateInlineReturnExpressionPlaceHolder switch (tree->OperGet()) { case GT_BLK: case GT_OBJ: case GT_IND: return tree->AsOp()->gtOp1; case GT_COMMA: tree->AsOp()->gtOp2 = fgGetStructAsStructPtr(tree->AsOp()->gtOp2); tree->gtType = TYP_BYREF; return tree; default: return gtNewOperNode(GT_ADDR, TYP_BYREF, tree); } } /*************************************************************************************************** * child - The inlinee of the retExpr node. * retClsHnd - The struct class handle of the type of the inlinee. * * Assign the inlinee to a tmp, if it is a call, just assign it to a lclVar, else we can * use a copyblock to do the assignment. */ GenTree* Compiler::fgAssignStructInlineeToVar(GenTree* child, CORINFO_CLASS_HANDLE retClsHnd) { assert(child->gtOper != GT_RET_EXPR && child->gtOper != GT_MKREFANY); unsigned tmpNum = lvaGrabTemp(false DEBUGARG("RetBuf for struct inline return candidates.")); lvaSetStruct(tmpNum, retClsHnd, false); var_types structType = lvaTable[tmpNum].lvType; GenTree* dst = gtNewLclvNode(tmpNum, structType); // If we have a call, we'd like it to be: V00 = call(), but first check if // we have a ", , , call()" -- this is very defensive as we may never get // an inlinee that is made of commas. If the inlinee is not a call, then // we use a copy block to do the assignment. GenTree* src = child; GenTree* lastComma = nullptr; while (src->gtOper == GT_COMMA) { lastComma = src; src = src->AsOp()->gtOp2; } GenTree* newInlinee = nullptr; if (src->gtOper == GT_CALL) { // If inlinee was just a call, new inlinee is v05 = call() newInlinee = gtNewAssignNode(dst, src); // When returning a multi-register value in a local var, make sure the variable is // marked as lvIsMultiRegRet, so it does not get promoted. if (src->AsCall()->HasMultiRegRetVal()) { lvaTable[tmpNum].lvIsMultiRegRet = true; } // If inlinee was comma, but a deeper call, new inlinee is (, , , v05 = call()) if (child->gtOper == GT_COMMA) { lastComma->AsOp()->gtOp2 = newInlinee; newInlinee = child; } } else { // Inlinee is not a call, so just create a copy block to the tmp. src = child; GenTree* dstAddr = fgGetStructAsStructPtr(dst); GenTree* srcAddr = fgGetStructAsStructPtr(src); newInlinee = gtNewCpObjNode(dstAddr, srcAddr, retClsHnd, false); } GenTree* production = gtNewLclvNode(tmpNum, structType); return gtNewOperNode(GT_COMMA, structType, newInlinee, production); } /*************************************************************************************************** * tree - The tree pointer that has one of its child nodes as retExpr. * child - The inlinee child. * retClsHnd - The struct class handle of the type of the inlinee. * * V04 = call() assignments are okay as we codegen it. Everything else needs to be a copy block or * would need a temp. For example, a cast(ldobj) will then be, cast(v05 = ldobj, v05); But it is * a very rare (or impossible) scenario that we'd have a retExpr transform into a ldobj other than * a lclVar/call. So it is not worthwhile to do pattern matching optimizations like addr(ldobj(op1)) * can just be op1. */ void Compiler::fgAttachStructInlineeToAsg(GenTree* tree, GenTree* child, CORINFO_CLASS_HANDLE retClsHnd) { // We are okay to have: // 1. V02 = call(); // 2. copyBlk(dstAddr, srcAddr); assert(tree->gtOper == GT_ASG); // We have an assignment, we codegen only V05 = call(). if (child->gtOper == GT_CALL && tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { // If it is a multireg return on x64/ux, the local variable should be marked as lvIsMultiRegRet if (child->AsCall()->HasMultiRegRetVal()) { unsigned lclNum = tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); lvaTable[lclNum].lvIsMultiRegRet = true; } return; } GenTree* dstAddr = fgGetStructAsStructPtr(tree->AsOp()->gtOp1); GenTree* srcAddr = fgGetStructAsStructPtr( (child->gtOper == GT_CALL) ? fgAssignStructInlineeToVar(child, retClsHnd) // Assign to a variable if it is a call. : child); // Just get the address, if not a call. tree->ReplaceWith(gtNewCpObjNode(dstAddr, srcAddr, retClsHnd, false), this); } #endif // FEATURE_MULTIREG_RET //------------------------------------------------------------------------ // fgUpdateInlineReturnExpressionPlaceHolder: callback to replace the // inline return expression placeholder. // // Arguments: // pTree -- pointer to tree to examine for updates // data -- context data for the tree walk // // Returns: // fgWalkResult indicating the walk should continue; that // is we wish to fully explore the tree. // // Notes: // Looks for GT_RET_EXPR nodes that arose from tree splitting done // during importation for inline candidates, and replaces them. // // For successful inlines, substitutes the return value expression // from the inline body for the GT_RET_EXPR. // // For failed inlines, rejoins the original call into the tree from // whence it was split during importation. // // The code doesn't actually know if the corresponding inline // succeeded or not; it relies on the fact that gtInlineCandidate // initially points back at the call and is modified in place to // the inlinee return expression if the inline is successful (see // tail end of fgInsertInlineeBlocks for the update of iciCall). // // If the return type is a struct type and we're on a platform // where structs can be returned in multiple registers, ensure the // call has a suitable parent. // // If the original call type and the substitution type are different // the functions makes necessary updates. It could happen if there was // an implicit conversion in the inlinee body. // Compiler::fgWalkResult Compiler::fgUpdateInlineReturnExpressionPlaceHolder(GenTree** pTree, fgWalkData* data) { // All the operations here and in the corresponding postorder // callback (fgLateDevirtualization) are triggered by GT_CALL or // GT_RET_EXPR trees, and these (should) have the call side // effect flag. // // So bail out for any trees that don't have this flag. GenTree* tree = *pTree; if ((tree->gtFlags & GTF_CALL) == 0) { return WALK_SKIP_SUBTREES; } bool* madeChanges = static_cast<bool*>(data->pCallbackData); Compiler* comp = data->compiler; CORINFO_CLASS_HANDLE retClsHnd = NO_CLASS_HANDLE; while (tree->OperGet() == GT_RET_EXPR) { // We are going to copy the tree from the inlinee, // so record the handle now. // if (varTypeIsStruct(tree)) { retClsHnd = tree->AsRetExpr()->gtRetClsHnd; } // Skip through chains of GT_RET_EXPRs (say from nested inlines) // to the actual tree to use. // // Also we might as well try and fold the return value. // Eg returns of constant bools will have CASTS. // This folding may uncover more GT_RET_EXPRs, so we loop around // until we've got something distinct. // BasicBlockFlags bbFlags = BBF_EMPTY; GenTree* inlineCandidate = tree->gtRetExprVal(&bbFlags); inlineCandidate = comp->gtFoldExpr(inlineCandidate); var_types retType = tree->TypeGet(); #ifdef DEBUG if (comp->verbose) { printf("\nReplacing the return expression placeholder "); printTreeID(tree); printf(" with "); printTreeID(inlineCandidate); printf("\n"); // Dump out the old return expression placeholder it will be overwritten by the ReplaceWith below comp->gtDispTree(tree); } #endif // DEBUG var_types newType = inlineCandidate->TypeGet(); // If we end up swapping type we may need to retype the tree: if (retType != newType) { if ((retType == TYP_BYREF) && (tree->OperGet() == GT_IND)) { // - in an RVA static if we've reinterpreted it as a byref; assert(newType == TYP_I_IMPL); JITDUMP("Updating type of the return GT_IND expression to TYP_BYREF\n"); inlineCandidate->gtType = TYP_BYREF; } else { // - under a call if we changed size of the argument. GenTree* putArgType = comp->fgCheckCallArgUpdate(data->parent, inlineCandidate, retType); if (putArgType != nullptr) { inlineCandidate = putArgType; } } } tree->ReplaceWith(inlineCandidate, comp); *madeChanges = true; comp->compCurBB->bbFlags |= (bbFlags & BBF_SPLIT_GAINED); #ifdef DEBUG if (comp->verbose) { printf("\nInserting the inline return expression\n"); comp->gtDispTree(tree); printf("\n"); } #endif // DEBUG } // If an inline was rejected and the call returns a struct, we may // have deferred some work when importing call for cases where the // struct is returned in register(s). // // See the bail-out clauses in impFixupCallStructReturn for inline // candidates. // // Do the deferred work now. if (retClsHnd != NO_CLASS_HANDLE) { structPassingKind howToReturnStruct; var_types returnType = comp->getReturnTypeForStruct(retClsHnd, CorInfoCallConvExtension::Managed, &howToReturnStruct); GenTree* parent = data->parent; switch (howToReturnStruct) { #if FEATURE_MULTIREG_RET // Is this a type that is returned in multiple registers // or a via a primitve type that is larger than the struct type? // if so we need to force into into a form we accept. // i.e. LclVar = call() case SPK_ByValue: case SPK_ByValueAsHfa: { // See assert below, we only look one level above for an asg parent. if (parent->gtOper == GT_ASG) { // Either lhs is a call V05 = call(); or lhs is addr, and asg becomes a copyBlk. comp->fgAttachStructInlineeToAsg(parent, tree, retClsHnd); } else { // Just assign the inlinee to a variable to keep it simple. tree->ReplaceWith(comp->fgAssignStructInlineeToVar(tree, retClsHnd), comp); } *madeChanges = true; } break; #endif // FEATURE_MULTIREG_RET case SPK_EnclosingType: case SPK_PrimitiveType: // No work needs to be done, the call has struct type and should keep it. break; case SPK_ByReference: // We should have already added the return buffer // when we first imported the call break; default: noway_assert(!"Unexpected struct passing kind"); break; } } #if FEATURE_MULTIREG_RET #if defined(DEBUG) // Make sure we don't have a tree like so: V05 = (, , , retExpr); // Since we only look one level above for the parent for '=' and // do not check if there is a series of COMMAs. See above. // Importer and FlowGraph will not generate such a tree, so just // leaving an assert in here. This can be fixed by looking ahead // when we visit GT_ASG similar to fgAttachStructInlineeToAsg. // if (tree->OperGet() == GT_ASG) { GenTree* value = tree->AsOp()->gtOp2; if (value->OperGet() == GT_COMMA) { GenTree* effectiveValue = value->gtEffectiveVal(/*commaOnly*/ true); noway_assert(!varTypeIsStruct(effectiveValue) || (effectiveValue->OperGet() != GT_RET_EXPR) || !comp->IsMultiRegReturnedType(effectiveValue->AsRetExpr()->gtRetClsHnd, CorInfoCallConvExtension::Managed)); } } #endif // defined(DEBUG) #endif // FEATURE_MULTIREG_RET return WALK_CONTINUE; } //------------------------------------------------------------------------ // fgLateDevirtualization: re-examine calls after inlining to see if we // can do more devirtualization // // Arguments: // pTree -- pointer to tree to examine for updates // data -- context data for the tree walk // // Returns: // fgWalkResult indicating the walk should continue; that // is we wish to fully explore the tree. // // Notes: // We used to check this opportunistically in the preorder callback for // calls where the `obj` was fed by a return, but we now re-examine // all calls. // // Late devirtualization (and eventually, perhaps, other type-driven // opts like cast optimization) can happen now because inlining or other // optimizations may have provided more accurate types than we saw when // first importing the trees. // // It would be nice to screen candidate sites based on the likelihood // that something has changed. Otherwise we'll waste some time retrying // an optimization that will just fail again. Compiler::fgWalkResult Compiler::fgLateDevirtualization(GenTree** pTree, fgWalkData* data) { GenTree* tree = *pTree; GenTree* parent = data->parent; Compiler* comp = data->compiler; bool* madeChanges = static_cast<bool*>(data->pCallbackData); // In some (rare) cases the parent node of tree will be smashed to a NOP during // the preorder by fgAttachStructToInlineeArg. // // jit\Methodical\VT\callconv\_il_reljumper3 for x64 linux // // If so, just bail out here. if (tree == nullptr) { assert((parent != nullptr) && parent->OperGet() == GT_NOP); return WALK_CONTINUE; } if (tree->OperGet() == GT_CALL) { GenTreeCall* call = tree->AsCall(); bool tryLateDevirt = call->IsVirtual() && (call->gtCallType == CT_USER_FUNC); #ifdef DEBUG tryLateDevirt = tryLateDevirt && (JitConfig.JitEnableLateDevirtualization() == 1); #endif // DEBUG if (tryLateDevirt) { #ifdef DEBUG if (comp->verbose) { printf("**** Late devirt opportunity\n"); comp->gtDispTree(call); } #endif // DEBUG CORINFO_CONTEXT_HANDLE context = nullptr; CORINFO_METHOD_HANDLE method = call->gtCallMethHnd; unsigned methodFlags = 0; const bool isLateDevirtualization = true; const bool explicitTailCall = call->IsTailPrefixedCall(); if ((call->gtCallMoreFlags & GTF_CALL_M_LATE_DEVIRT) != 0) { context = call->gtLateDevirtualizationInfo->exactContextHnd; call->gtLateDevirtualizationInfo = nullptr; } comp->impDevirtualizeCall(call, nullptr, &method, &methodFlags, &context, nullptr, isLateDevirtualization, explicitTailCall); *madeChanges = true; } } else if (tree->OperGet() == GT_ASG) { // If we're assigning to a ref typed local that has one definition, // we may be able to sharpen the type for the local. GenTree* const effLhs = tree->gtGetOp1()->gtEffectiveVal(); if ((effLhs->OperGet() == GT_LCL_VAR) && (effLhs->TypeGet() == TYP_REF)) { const unsigned lclNum = effLhs->AsLclVarCommon()->GetLclNum(); LclVarDsc* lcl = comp->lvaGetDesc(lclNum); if (lcl->lvSingleDef) { GenTree* rhs = tree->gtGetOp2(); bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE newClass = comp->gtGetClassHandle(rhs, &isExact, &isNonNull); if (newClass != NO_CLASS_HANDLE) { comp->lvaUpdateClass(lclNum, newClass, isExact); *madeChanges = true; } } } // If we created a self-assignment (say because we are sharing return spill temps) // we can remove it. // GenTree* const lhs = tree->gtGetOp1(); GenTree* const rhs = tree->gtGetOp2(); if (lhs->OperIs(GT_LCL_VAR) && GenTree::Compare(lhs, rhs)) { comp->gtUpdateNodeSideEffects(tree); assert((tree->gtFlags & GTF_SIDE_EFFECT) == GTF_ASG); JITDUMP("... removing self-assignment\n"); DISPTREE(tree); tree->gtBashToNOP(); *madeChanges = true; } } else if (tree->OperGet() == GT_JTRUE) { // See if this jtrue is now foldable. BasicBlock* block = comp->compCurBB; GenTree* condTree = tree->AsOp()->gtOp1; assert(tree == block->lastStmt()->GetRootNode()); if (condTree->OperGet() == GT_CNS_INT) { JITDUMP(" ... found foldable jtrue at [%06u] in " FMT_BB "\n", dspTreeID(tree), block->bbNum); noway_assert((block->bbNext->countOfInEdges() > 0) && (block->bbJumpDest->countOfInEdges() > 0)); // We have a constant operand, and should have the all clear to optimize. // Update side effects on the tree, assert there aren't any, and bash to nop. comp->gtUpdateNodeSideEffects(tree); assert((tree->gtFlags & GTF_SIDE_EFFECT) == 0); tree->gtBashToNOP(); *madeChanges = true; BasicBlock* bNotTaken = nullptr; if (condTree->AsIntCon()->gtIconVal != 0) { block->bbJumpKind = BBJ_ALWAYS; bNotTaken = block->bbNext; } else { block->bbJumpKind = BBJ_NONE; bNotTaken = block->bbJumpDest; } comp->fgRemoveRefPred(bNotTaken, block); // If that was the last ref, a subsequent flow-opt pass // will clean up the now-unreachable bNotTaken, and any // other transitively unreachable blocks. if (bNotTaken->bbRefs == 0) { JITDUMP("... it looks like " FMT_BB " is now unreachable!\n", bNotTaken->bbNum); } } } else { const var_types retType = tree->TypeGet(); GenTree* foldedTree = comp->gtFoldExpr(tree); GenTree* putArgType = comp->fgCheckCallArgUpdate(data->parent, foldedTree, retType); if (putArgType != nullptr) { foldedTree = putArgType; } *pTree = foldedTree; *madeChanges = true; } return WALK_CONTINUE; } #ifdef DEBUG /***************************************************************************** * Callback to make sure there is no more GT_RET_EXPR and GTF_CALL_INLINE_CANDIDATE nodes. */ /* static */ Compiler::fgWalkResult Compiler::fgDebugCheckInlineCandidates(GenTree** pTree, fgWalkData* data) { GenTree* tree = *pTree; if (tree->gtOper == GT_CALL) { assert((tree->gtFlags & GTF_CALL_INLINE_CANDIDATE) == 0); } else { assert(tree->gtOper != GT_RET_EXPR); } return WALK_CONTINUE; } #endif // DEBUG void Compiler::fgInvokeInlineeCompiler(GenTreeCall* call, InlineResult* inlineResult, InlineContext** createdContext) { noway_assert(call->gtOper == GT_CALL); noway_assert((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0); noway_assert(opts.OptEnabled(CLFLG_INLINING)); // This is the InlineInfo struct representing a method to be inlined. InlineInfo inlineInfo; memset(&inlineInfo, 0, sizeof(inlineInfo)); CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd; inlineInfo.fncHandle = fncHandle; inlineInfo.iciCall = call; inlineInfo.iciStmt = fgMorphStmt; inlineInfo.iciBlock = compCurBB; inlineInfo.thisDereferencedFirst = false; inlineInfo.retExpr = nullptr; inlineInfo.retBB = nullptr; inlineInfo.retExprClassHnd = nullptr; inlineInfo.retExprClassHndIsExact = false; inlineInfo.inlineResult = inlineResult; #ifdef FEATURE_SIMD inlineInfo.hasSIMDTypeArgLocalOrReturn = false; #endif // FEATURE_SIMD InlineCandidateInfo* inlineCandidateInfo = call->gtInlineCandidateInfo; noway_assert(inlineCandidateInfo); // Store the link to inlineCandidateInfo into inlineInfo inlineInfo.inlineCandidateInfo = inlineCandidateInfo; unsigned inlineDepth = fgCheckInlineDepthAndRecursion(&inlineInfo); if (inlineResult->IsFailure()) { #ifdef DEBUG if (verbose) { printf("Recursive or deep inline recursion detected. Will not expand this INLINECANDIDATE \n"); } #endif // DEBUG return; } // Set the trap to catch all errors (including recoverable ones from the EE) struct Param { Compiler* pThis; GenTree* call; CORINFO_METHOD_HANDLE fncHandle; InlineCandidateInfo* inlineCandidateInfo; InlineInfo* inlineInfo; } param; memset(&param, 0, sizeof(param)); param.pThis = this; param.call = call; param.fncHandle = fncHandle; param.inlineCandidateInfo = inlineCandidateInfo; param.inlineInfo = &inlineInfo; bool success = eeRunWithErrorTrap<Param>( [](Param* pParam) { // Init the local var info of the inlinee pParam->pThis->impInlineInitVars(pParam->inlineInfo); if (pParam->inlineInfo->inlineResult->IsCandidate()) { /* Clear the temp table */ memset(pParam->inlineInfo->lclTmpNum, -1, sizeof(pParam->inlineInfo->lclTmpNum)); // // Prepare the call to jitNativeCode // pParam->inlineInfo->InlinerCompiler = pParam->pThis; if (pParam->pThis->impInlineInfo == nullptr) { pParam->inlineInfo->InlineRoot = pParam->pThis; } else { pParam->inlineInfo->InlineRoot = pParam->pThis->impInlineInfo->InlineRoot; } // The inline context is part of debug info and must be created // before we start creating statements; we lazily create it as // late as possible, which is here. pParam->inlineInfo->inlineContext = pParam->inlineInfo->InlineRoot->m_inlineStrategy ->NewContext(pParam->inlineInfo->inlineCandidateInfo->inlinersContext, pParam->inlineInfo->iciStmt, pParam->inlineInfo->iciCall); pParam->inlineInfo->argCnt = pParam->inlineCandidateInfo->methInfo.args.totalILArgs(); pParam->inlineInfo->tokenLookupContextHandle = pParam->inlineCandidateInfo->exactContextHnd; JITLOG_THIS(pParam->pThis, (LL_INFO100000, "INLINER: inlineInfo.tokenLookupContextHandle for %s set to 0x%p:\n", pParam->pThis->eeGetMethodFullName(pParam->fncHandle), pParam->pThis->dspPtr(pParam->inlineInfo->tokenLookupContextHandle))); JitFlags compileFlagsForInlinee = *pParam->pThis->opts.jitFlags; // The following flags are lost when inlining. // (This is checked in Compiler::compInitOptions().) compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_BBINSTR); compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_PROF_ENTERLEAVE); compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_DEBUG_EnC); compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_REVERSE_PINVOKE); compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_TRACK_TRANSITIONS); compileFlagsForInlinee.Set(JitFlags::JIT_FLAG_SKIP_VERIFICATION); #ifdef DEBUG if (pParam->pThis->verbose) { printf("\nInvoking compiler for the inlinee method %s :\n", pParam->pThis->eeGetMethodFullName(pParam->fncHandle)); } #endif // DEBUG int result = jitNativeCode(pParam->fncHandle, pParam->inlineCandidateInfo->methInfo.scope, pParam->pThis->info.compCompHnd, &pParam->inlineCandidateInfo->methInfo, (void**)pParam->inlineInfo, nullptr, &compileFlagsForInlinee, pParam->inlineInfo); if (result != CORJIT_OK) { // If we haven't yet determined why this inline fails, use // a catch-all something bad happened observation. InlineResult* innerInlineResult = pParam->inlineInfo->inlineResult; if (!innerInlineResult->IsFailure()) { innerInlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_FAILURE); } } } }, &param); if (!success) { #ifdef DEBUG if (verbose) { printf("\nInlining failed due to an exception during invoking the compiler for the inlinee method %s.\n", eeGetMethodFullName(fncHandle)); } #endif // DEBUG // If we haven't yet determined why this inline fails, use // a catch-all something bad happened observation. if (!inlineResult->IsFailure()) { inlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); } } *createdContext = inlineInfo.inlineContext; if (inlineResult->IsFailure()) { return; } #ifdef DEBUG if (0 && verbose) { printf("\nDone invoking compiler for the inlinee method %s\n", eeGetMethodFullName(fncHandle)); } #endif // DEBUG // If there is non-NULL return, but we haven't set the pInlineInfo->retExpr, // That means we haven't imported any BB that contains CEE_RET opcode. // (This could happen for example for a BBJ_THROW block fall through a BBJ_RETURN block which // causes the BBJ_RETURN block not to be imported at all.) // Fail the inlining attempt if (inlineCandidateInfo->fncRetType != TYP_VOID && inlineInfo.retExpr == nullptr) { #ifdef DEBUG if (verbose) { printf("\nInlining failed because pInlineInfo->retExpr is not set in the inlinee method %s.\n", eeGetMethodFullName(fncHandle)); } #endif // DEBUG inlineResult->NoteFatal(InlineObservation::CALLEE_LACKS_RETURN); return; } // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! // The inlining attempt cannot be failed starting from this point. // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! // We've successfully obtain the list of inlinee's basic blocks. // Let's insert it to inliner's basic block list. fgInsertInlineeBlocks(&inlineInfo); #ifdef DEBUG if (verbose) { printf("Successfully inlined %s (%d IL bytes) (depth %d) [%s]\n", eeGetMethodFullName(fncHandle), inlineCandidateInfo->methInfo.ILCodeSize, inlineDepth, inlineResult->ReasonString()); } if (verbose) { printf("--------------------------------------------------------------------------------------------\n"); } #endif // DEBUG #if defined(DEBUG) impInlinedCodeSize += inlineCandidateInfo->methInfo.ILCodeSize; #endif // We inlined... inlineResult->NoteSuccess(); } //------------------------------------------------------------------------ // fgInsertInlineeBlocks: incorporate statements for an inline into the // root method. // // Arguments: // inlineInfo -- info for the inline // // Notes: // The inlining attempt cannot be failed once this method is called. // // Adds all inlinee statements, plus any glue statements needed // either before or after the inlined call. // // Updates flow graph and assigns weights to inlinee // blocks. Currently does not attempt to read IBC data for the // inlinee. // // Updates relevant root method status flags (eg optMethodFlags) to // include information from the inlinee. // // Marks newly added statements with an appropriate inline context. void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) { GenTreeCall* iciCall = pInlineInfo->iciCall; Statement* iciStmt = pInlineInfo->iciStmt; BasicBlock* iciBlock = pInlineInfo->iciBlock; noway_assert(iciBlock->bbStmtList != nullptr); noway_assert(iciStmt->GetRootNode() != nullptr); assert(iciStmt->GetRootNode() == iciCall); noway_assert(iciCall->gtOper == GT_CALL); #ifdef DEBUG Statement* currentDumpStmt = nullptr; if (verbose) { printf("\n\n----------- Statements (and blocks) added due to the inlining of call "); printTreeID(iciCall); printf(" -----------\n"); } #endif // DEBUG // Mark success. pInlineInfo->inlineContext->SetSucceeded(pInlineInfo); // Prepend statements Statement* stmtAfter = fgInlinePrependStatements(pInlineInfo); #ifdef DEBUG if (verbose) { currentDumpStmt = stmtAfter; printf("\nInlinee method body:"); } #endif // DEBUG BasicBlock* topBlock = iciBlock; BasicBlock* bottomBlock = nullptr; if (InlineeCompiler->fgBBcount == 1) { // When fgBBCount is 1 we will always have a non-NULL fgFirstBB // PREFAST_ASSUME(InlineeCompiler->fgFirstBB != nullptr); // DDB 91389: Don't throw away the (only) inlinee block // when its return type is not BBJ_RETURN. // In other words, we need its BBJ_ to perform the right thing. if (InlineeCompiler->fgFirstBB->bbJumpKind == BBJ_RETURN) { // Inlinee contains just one BB. So just insert its statement list to topBlock. if (InlineeCompiler->fgFirstBB->bbStmtList != nullptr) { stmtAfter = fgInsertStmtListAfter(iciBlock, stmtAfter, InlineeCompiler->fgFirstBB->firstStmt()); } // Copy inlinee bbFlags to caller bbFlags. const BasicBlockFlags inlineeBlockFlags = InlineeCompiler->fgFirstBB->bbFlags; noway_assert((inlineeBlockFlags & BBF_HAS_JMP) == 0); noway_assert((inlineeBlockFlags & BBF_KEEP_BBJ_ALWAYS) == 0); // Todo: we may want to exclude other flags here. iciBlock->bbFlags |= (inlineeBlockFlags & ~BBF_RUN_RARELY); #ifdef DEBUG if (verbose) { noway_assert(currentDumpStmt); if (currentDumpStmt != stmtAfter) { do { currentDumpStmt = currentDumpStmt->GetNextStmt(); printf("\n"); gtDispStmt(currentDumpStmt); printf("\n"); } while (currentDumpStmt != stmtAfter); } } #endif // DEBUG // Append statements to null out gc ref locals, if necessary. fgInlineAppendStatements(pInlineInfo, iciBlock, stmtAfter); goto _Done; } } // // ======= Inserting inlinee's basic blocks =============== // bottomBlock = fgNewBBafter(topBlock->bbJumpKind, topBlock, true); bottomBlock->bbRefs = 1; bottomBlock->bbJumpDest = topBlock->bbJumpDest; bottomBlock->inheritWeight(topBlock); topBlock->bbJumpKind = BBJ_NONE; // Update block flags { const BasicBlockFlags originalFlags = topBlock->bbFlags; noway_assert((originalFlags & BBF_SPLIT_NONEXIST) == 0); topBlock->bbFlags &= ~(BBF_SPLIT_LOST); bottomBlock->bbFlags |= originalFlags & BBF_SPLIT_GAINED; } // Split statements between topBlock and bottomBlock. // First figure out bottomBlock_Begin Statement* bottomBlock_Begin; bottomBlock_Begin = stmtAfter->GetNextStmt(); if (topBlock->bbStmtList == nullptr) { // topBlock is empty before the split. // In this case, both topBlock and bottomBlock should be empty noway_assert(bottomBlock_Begin == nullptr); topBlock->bbStmtList = nullptr; bottomBlock->bbStmtList = nullptr; } else if (topBlock->bbStmtList == bottomBlock_Begin) { noway_assert(bottomBlock_Begin != nullptr); // topBlock contains at least one statement before the split. // And the split is before the first statement. // In this case, topBlock should be empty, and everything else should be moved to the bottomBlock. bottomBlock->bbStmtList = topBlock->bbStmtList; topBlock->bbStmtList = nullptr; } else if (bottomBlock_Begin == nullptr) { noway_assert(topBlock->bbStmtList != nullptr); // topBlock contains at least one statement before the split. // And the split is at the end of the topBlock. // In this case, everything should be kept in the topBlock, and the bottomBlock should be empty bottomBlock->bbStmtList = nullptr; } else { noway_assert(topBlock->bbStmtList != nullptr); noway_assert(bottomBlock_Begin != nullptr); // This is the normal case where both blocks should contain at least one statement. Statement* topBlock_Begin = topBlock->firstStmt(); noway_assert(topBlock_Begin != nullptr); Statement* topBlock_End = bottomBlock_Begin->GetPrevStmt(); noway_assert(topBlock_End != nullptr); Statement* bottomBlock_End = topBlock->lastStmt(); noway_assert(bottomBlock_End != nullptr); // Break the linkage between 2 blocks. topBlock_End->SetNextStmt(nullptr); // Fix up all the pointers. topBlock->bbStmtList = topBlock_Begin; topBlock->bbStmtList->SetPrevStmt(topBlock_End); bottomBlock->bbStmtList = bottomBlock_Begin; bottomBlock->bbStmtList->SetPrevStmt(bottomBlock_End); } // // Set the try and handler index and fix the jump types of inlinee's blocks. // for (BasicBlock* const block : InlineeCompiler->Blocks()) { noway_assert(!block->hasTryIndex()); noway_assert(!block->hasHndIndex()); block->copyEHRegion(iciBlock); block->bbFlags |= iciBlock->bbFlags & BBF_BACKWARD_JUMP; DebugInfo di = iciStmt->GetDebugInfo().GetRoot(); if (di.IsValid()) { block->bbCodeOffs = di.GetLocation().GetOffset(); block->bbCodeOffsEnd = block->bbCodeOffs + 1; // TODO: is code size of 1 some magic number for inlining? } else { block->bbCodeOffs = 0; // TODO: why not BAD_IL_OFFSET? block->bbCodeOffsEnd = 0; block->bbFlags |= BBF_INTERNAL; } if (block->bbJumpKind == BBJ_RETURN) { noway_assert((block->bbFlags & BBF_HAS_JMP) == 0); if (block->bbNext) { JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_ALWAYS to bottomBlock " FMT_BB "\n", block->bbNum, bottomBlock->bbNum); block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = bottomBlock; } else { JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_NONE\n", block->bbNum); block->bbJumpKind = BBJ_NONE; } } } // Insert inlinee's blocks into inliner's block list. topBlock->setNext(InlineeCompiler->fgFirstBB); InlineeCompiler->fgLastBB->setNext(bottomBlock); // // Add inlinee's block count to inliner's. // fgBBcount += InlineeCompiler->fgBBcount; // Append statements to null out gc ref locals, if necessary. fgInlineAppendStatements(pInlineInfo, bottomBlock, nullptr); #ifdef DEBUG if (verbose) { fgDispBasicBlocks(InlineeCompiler->fgFirstBB, InlineeCompiler->fgLastBB, true); } #endif // DEBUG _Done: // // At this point, we have successully inserted inlinee's code. // // // Copy out some flags // compLongUsed |= InlineeCompiler->compLongUsed; compFloatingPointUsed |= InlineeCompiler->compFloatingPointUsed; compLocallocUsed |= InlineeCompiler->compLocallocUsed; compLocallocOptimized |= InlineeCompiler->compLocallocOptimized; compQmarkUsed |= InlineeCompiler->compQmarkUsed; compGSReorderStackLayout |= InlineeCompiler->compGSReorderStackLayout; compHasBackwardJump |= InlineeCompiler->compHasBackwardJump; lvaGenericsContextInUse |= InlineeCompiler->lvaGenericsContextInUse; #ifdef FEATURE_SIMD if (InlineeCompiler->usesSIMDTypes()) { setUsesSIMDTypes(true); } #endif // FEATURE_SIMD // Update unmanaged call details info.compUnmanagedCallCountWithGCTransition += InlineeCompiler->info.compUnmanagedCallCountWithGCTransition; // Update stats for inlinee PGO // if (InlineeCompiler->fgPgoSchema != nullptr) { fgPgoInlineePgo++; } else if (InlineeCompiler->fgPgoFailReason != nullptr) { // Single block inlinees may not have probes // when we've ensabled minimal profiling (which // is now the default). // if (InlineeCompiler->fgBBcount == 1) { fgPgoInlineeNoPgoSingleBlock++; } else { fgPgoInlineeNoPgo++; } } // Update optMethodFlags CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG unsigned optMethodFlagsBefore = optMethodFlags; #endif optMethodFlags |= InlineeCompiler->optMethodFlags; #ifdef DEBUG if (optMethodFlags != optMethodFlagsBefore) { JITDUMP("INLINER: Updating optMethodFlags -- root:%0x callee:%0x new:%0x\n", optMethodFlagsBefore, InlineeCompiler->optMethodFlags, optMethodFlags); } #endif // If an inlinee needs GS cookie we need to make sure that the cookie will not be allocated at zero stack offset. // Note that if the root method needs GS cookie then this has already been taken care of. if (!getNeedsGSSecurityCookie() && InlineeCompiler->getNeedsGSSecurityCookie()) { setNeedsGSSecurityCookie(); const unsigned dummy = lvaGrabTempWithImplicitUse(false DEBUGARG("GSCookie dummy for inlinee")); LclVarDsc* gsCookieDummy = lvaGetDesc(dummy); gsCookieDummy->lvType = TYP_INT; gsCookieDummy->lvIsTemp = true; // It is not alive at all, set the flag to prevent zero-init. lvaSetVarDoNotEnregister(dummy DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } // If there is non-NULL return, replace the GT_CALL with its return value expression, // so later it will be picked up by the GT_RET_EXPR node. if ((pInlineInfo->inlineCandidateInfo->fncRetType != TYP_VOID) || (iciCall->gtReturnType == TYP_STRUCT)) { noway_assert(pInlineInfo->retExpr); #ifdef DEBUG if (verbose) { printf("\nReturn expression for call at "); printTreeID(iciCall); printf(" is\n"); gtDispTree(pInlineInfo->retExpr); } #endif // DEBUG // Replace the call with the return expression. Note that iciCall won't be part of the IR // but may still be referenced from a GT_RET_EXPR node. We will replace GT_RET_EXPR node // in fgUpdateInlineReturnExpressionPlaceHolder. At that time we will also update the flags // on the basic block of GT_RET_EXPR node. if (iciCall->gtInlineCandidateInfo->retExpr->OperGet() == GT_RET_EXPR) { // Save the basic block flags from the retExpr basic block. iciCall->gtInlineCandidateInfo->retExpr->AsRetExpr()->bbFlags = pInlineInfo->retBB->bbFlags; } if (bottomBlock != nullptr) { // We've split the iciblock into two and the RET_EXPR was possibly moved to the bottomBlock // so let's update its flags with retBB's ones bottomBlock->bbFlags |= pInlineInfo->retBB->bbFlags & BBF_COMPACT_UPD; } iciCall->ReplaceWith(pInlineInfo->retExpr, this); } // // Detach the GT_CALL node from the original statement by hanging a "nothing" node under it, // so that fgMorphStmts can remove the statement once we return from here. // iciStmt->SetRootNode(gtNewNothingNode()); } //------------------------------------------------------------------------ // fgInlinePrependStatements: prepend statements needed to match up // caller and inlined callee // // Arguments: // inlineInfo -- info for the inline // // Return Value: // The last statement that was added, or the original call if no // statements were added. // // Notes: // Statements prepended may include the following: // * This pointer null check // * Class initialization // * Zeroing of must-init locals in the callee // * Passing of call arguments via temps // // Newly added statements are placed just after the original call // and are are given the same inline context as the call any calls // added here will appear to have been part of the immediate caller. Statement* Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo) { BasicBlock* block = inlineInfo->iciBlock; Statement* callStmt = inlineInfo->iciStmt; const DebugInfo& callDI = callStmt->GetDebugInfo(); Statement* postStmt = callStmt->GetNextStmt(); Statement* afterStmt = callStmt; // afterStmt is the place where the new statements should be inserted after. Statement* newStmt = nullptr; GenTreeCall* call = inlineInfo->iciCall->AsCall(); noway_assert(call->gtOper == GT_CALL); #ifdef DEBUG if (0 && verbose) { printf("\nfgInlinePrependStatements for iciCall= "); printTreeID(call); printf(":\n"); } #endif // Prepend statements for any initialization / side effects InlArgInfo* inlArgInfo = inlineInfo->inlArgInfo; InlLclVarInfo* lclVarInfo = inlineInfo->lclVarInfo; GenTree* tree; // Create the null check statement (but not appending it to the statement list yet) for the 'this' pointer if // necessary. // The NULL check should be done after "argument setup statements". // The only reason we move it here is for calling "impInlineFetchArg(0,..." to reserve a temp // for the "this" pointer. // Note: Here we no longer do the optimization that was done by thisDereferencedFirst in the old inliner. // However the assetionProp logic will remove any unecessary null checks that we may have added // GenTree* nullcheck = nullptr; if (call->gtFlags & GTF_CALL_NULLCHECK && !inlineInfo->thisDereferencedFirst) { // Call impInlineFetchArg to "reserve" a temp for the "this" pointer. GenTree* thisOp = impInlineFetchArg(0, inlArgInfo, lclVarInfo); if (fgAddrCouldBeNull(thisOp)) { nullcheck = gtNewNullCheck(thisOp, block); // The NULL-check statement will be inserted to the statement list after those statements // that assign arguments to temps and before the actual body of the inlinee method. } } /* Treat arguments that had to be assigned to temps */ if (inlineInfo->argCnt) { #ifdef DEBUG if (verbose) { printf("\nArguments setup:\n"); } #endif // DEBUG for (unsigned argNum = 0; argNum < inlineInfo->argCnt; argNum++) { const InlArgInfo& argInfo = inlArgInfo[argNum]; const bool argIsSingleDef = !argInfo.argHasLdargaOp && !argInfo.argHasStargOp; GenTree* argNode = inlArgInfo[argNum].argNode; const bool argHasPutArg = argNode->OperIs(GT_PUTARG_TYPE); BasicBlockFlags bbFlags = BBF_EMPTY; argNode = argNode->gtSkipPutArgType(); argNode = argNode->gtRetExprVal(&bbFlags); if (argInfo.argHasTmp) { noway_assert(argInfo.argIsUsed); /* argBashTmpNode is non-NULL iff the argument's value was referenced exactly once by the original IL. This offers an opportunity to avoid an intermediate temp and just insert the original argument tree. However, if the temp node has been cloned somewhere while importing (e.g. when handling isinst or dup), or if the IL took the address of the argument, then argBashTmpNode will be set (because the value was only explicitly retrieved once) but the optimization cannot be applied. */ GenTree* argSingleUseNode = argInfo.argBashTmpNode; // argHasPutArg disqualifies the arg from a direct substitution because we don't have information about // its user. For example: replace `LCL_VAR short` with `PUTARG_TYPE short->LCL_VAR int`, // we should keep `PUTARG_TYPE` iff the user is a call that needs `short` and delete it otherwise. if ((argSingleUseNode != nullptr) && !(argSingleUseNode->gtFlags & GTF_VAR_CLONED) && argIsSingleDef && !argHasPutArg) { // Change the temp in-place to the actual argument. // We currently do not support this for struct arguments, so it must not be a GT_OBJ. assert(argNode->gtOper != GT_OBJ); argSingleUseNode->ReplaceWith(argNode, this); continue; } else { // We're going to assign the argument value to the // temp we use for it in the inline body. const unsigned tmpNum = argInfo.argTmpNum; const var_types argType = lclVarInfo[argNum].lclTypeInfo; // Create the temp assignment for this argument CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE; if (varTypeIsStruct(argType)) { structHnd = gtGetStructHandleIfPresent(argNode); noway_assert((structHnd != NO_CLASS_HANDLE) || (argType != TYP_STRUCT)); } // Unsafe value cls check is not needed for // argTmpNum here since in-linee compiler instance // would have iterated over these and marked them // accordingly. impAssignTempGen(tmpNum, argNode, structHnd, (unsigned)CHECK_SPILL_NONE, &afterStmt, callDI, block); // We used to refine the temp type here based on // the actual arg, but we now do this up front, when // creating the temp, over in impInlineFetchArg. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { gtDispStmt(afterStmt); } #endif // DEBUG } block->bbFlags |= (bbFlags & BBF_SPLIT_GAINED); } else if (argInfo.argIsByRefToStructLocal) { // Do nothing. Arg was directly substituted as we read // the inlinee. } else { /* The argument is either not used or a const or lcl var */ noway_assert(!argInfo.argIsUsed || argInfo.argIsInvariant || argInfo.argIsLclVar); /* Make sure we didnt change argNode's along the way, or else subsequent uses of the arg would have worked with the bashed value */ if (argInfo.argIsInvariant) { assert(argNode->OperIsConst() || argNode->gtOper == GT_ADDR); } noway_assert((argInfo.argIsLclVar == 0) == (argNode->gtOper != GT_LCL_VAR || (argNode->gtFlags & GTF_GLOB_REF))); /* If the argument has side effects, append it */ if (argInfo.argHasSideEff) { noway_assert(argInfo.argIsUsed == false); newStmt = nullptr; bool append = true; if (argNode->gtOper == GT_OBJ || argNode->gtOper == GT_MKREFANY) { // Don't put GT_OBJ node under a GT_COMMA. // Codegen can't deal with it. // Just hang the address here in case there are side-effect. newStmt = gtNewStmt(gtUnusedValNode(argNode->AsOp()->gtOp1), callDI); } else { // In some special cases, unused args with side effects can // trigger further changes. // // (1) If the arg is a static field access and the field access // was produced by a call to EqualityComparer<T>.get_Default, the // helper call to ensure the field has a value can be suppressed. // This helper call is marked as a "Special DCE" helper during // importation, over in fgGetStaticsCCtorHelper. // // (2) NYI. If, after tunneling through GT_RET_VALs, we find that // the actual arg expression has no side effects, we can skip // appending all together. This will help jit TP a bit. // // Chase through any GT_RET_EXPRs to find the actual argument // expression. GenTree* actualArgNode = argNode->gtRetExprVal(&bbFlags); // For case (1) // // Look for the following tree shapes // prejit: (IND (ADD (CONST, CALL(special dce helper...)))) // jit : (COMMA (CALL(special dce helper...), (FIELD ...))) if (actualArgNode->gtOper == GT_COMMA) { // Look for (COMMA (CALL(special dce helper...), (FIELD ...))) GenTree* op1 = actualArgNode->AsOp()->gtOp1; GenTree* op2 = actualArgNode->AsOp()->gtOp2; if (op1->IsCall() && ((op1->AsCall()->gtCallMoreFlags & GTF_CALL_M_HELPER_SPECIAL_DCE) != 0) && (op2->gtOper == GT_FIELD) && ((op2->gtFlags & GTF_EXCEPT) == 0)) { JITDUMP("\nPerforming special dce on unused arg [%06u]:" " actual arg [%06u] helper call [%06u]\n", argNode->gtTreeID, actualArgNode->gtTreeID, op1->gtTreeID); // Drop the whole tree append = false; } } else if (actualArgNode->gtOper == GT_IND) { // Look for (IND (ADD (CONST, CALL(special dce helper...)))) GenTree* addr = actualArgNode->AsOp()->gtOp1; if (addr->gtOper == GT_ADD) { GenTree* op1 = addr->AsOp()->gtOp1; GenTree* op2 = addr->AsOp()->gtOp2; if (op1->IsCall() && ((op1->AsCall()->gtCallMoreFlags & GTF_CALL_M_HELPER_SPECIAL_DCE) != 0) && op2->IsCnsIntOrI()) { // Drop the whole tree JITDUMP("\nPerforming special dce on unused arg [%06u]:" " actual arg [%06u] helper call [%06u]\n", argNode->gtTreeID, actualArgNode->gtTreeID, op1->gtTreeID); append = false; } } } } if (!append) { assert(newStmt == nullptr); JITDUMP("Arg tree side effects were discardable, not appending anything for arg\n"); } else { // If we don't have something custom to append, // just append the arg node as an unused value. if (newStmt == nullptr) { newStmt = gtNewStmt(gtUnusedValNode(argNode), callDI); } fgInsertStmtAfter(block, afterStmt, newStmt); afterStmt = newStmt; #ifdef DEBUG if (verbose) { gtDispStmt(afterStmt); } #endif // DEBUG } } else if (argNode->IsBoxedValue()) { // Try to clean up any unnecessary boxing side effects // since the box itself will be ignored. gtTryRemoveBoxUpstreamEffects(argNode); } block->bbFlags |= (bbFlags & BBF_SPLIT_GAINED); } } } // Add the CCTOR check if asked for. // Note: We no longer do the optimization that is done before by staticAccessedFirstUsingHelper in the old inliner. // Therefore we might prepend redundant call to HELPER.CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE // before the inlined method body, even if a static field of this type was accessed in the inlinee // using a helper before any other observable side-effect. if (inlineInfo->inlineCandidateInfo->initClassResult & CORINFO_INITCLASS_USE_HELPER) { CORINFO_CLASS_HANDLE exactClass = eeGetClassFromContext(inlineInfo->inlineCandidateInfo->exactContextHnd); tree = fgGetSharedCCtor(exactClass); newStmt = gtNewStmt(tree, callDI); fgInsertStmtAfter(block, afterStmt, newStmt); afterStmt = newStmt; } // Insert the nullcheck statement now. if (nullcheck) { newStmt = gtNewStmt(nullcheck, callDI); fgInsertStmtAfter(block, afterStmt, newStmt); afterStmt = newStmt; } // // Now zero-init inlinee locals // CORINFO_METHOD_INFO* InlineeMethodInfo = InlineeCompiler->info.compMethodInfo; unsigned lclCnt = InlineeMethodInfo->locals.numArgs; bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; // If the callee contains zero-init locals, we need to explicitly initialize them if we are // in a loop or if the caller doesn't have compInitMem set. Otherwise we can rely on the // normal logic in the caller to insert zero-init in the prolog if necessary. if ((lclCnt != 0) && ((InlineeMethodInfo->options & CORINFO_OPT_INIT_LOCALS) != 0) && ((bbInALoop && !bbIsReturn) || !info.compInitMem)) { #ifdef DEBUG if (verbose) { printf("\nZero init inlinee locals:\n"); } #endif // DEBUG for (unsigned lclNum = 0; lclNum < lclCnt; lclNum++) { unsigned tmpNum = inlineInfo->lclTmpNum[lclNum]; // If the local is used check whether we need to insert explicit zero initialization. if (tmpNum != BAD_VAR_NUM) { LclVarDsc* const tmpDsc = lvaGetDesc(tmpNum); if (!fgVarNeedsExplicitZeroInit(tmpNum, bbInALoop, bbIsReturn)) { JITDUMP("\nSuppressing zero-init for V%02u -- expect to zero in prolog\n", tmpNum); tmpDsc->lvSuppressedZeroInit = 1; compSuppressedZeroInit = true; continue; } var_types lclTyp = (var_types)lvaTable[tmpNum].lvType; noway_assert(lclTyp == lclVarInfo[lclNum + inlineInfo->argCnt].lclTypeInfo); if (!varTypeIsStruct(lclTyp)) { // Unsafe value cls check is not needed here since in-linee compiler instance would have // iterated over locals and marked accordingly. impAssignTempGen(tmpNum, gtNewZeroConNode(genActualType(lclTyp)), NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_NONE, &afterStmt, callDI, block); } else { tree = gtNewBlkOpNode(gtNewLclvNode(tmpNum, lclTyp), // Dest gtNewIconNode(0), // Value false, // isVolatile false); // not copyBlock newStmt = gtNewStmt(tree, callDI); fgInsertStmtAfter(block, afterStmt, newStmt); afterStmt = newStmt; } #ifdef DEBUG if (verbose) { gtDispStmt(afterStmt); } #endif // DEBUG } } } return afterStmt; } //------------------------------------------------------------------------ // fgInlineAppendStatements: Append statements that are needed // after the inlined call. // // Arguments: // inlineInfo - information about the inline // block - basic block for the new statements // stmtAfter - (optional) insertion point for mid-block cases // // Notes: // If the call we're inlining is in tail position then // we skip nulling the locals, since it can interfere // with tail calls introduced by the local. void Compiler::fgInlineAppendStatements(InlineInfo* inlineInfo, BasicBlock* block, Statement* stmtAfter) { // Null out any gc ref locals if (!inlineInfo->HasGcRefLocals()) { // No ref locals, nothing to do. JITDUMP("fgInlineAppendStatements: no gc ref inline locals.\n"); return; } if (inlineInfo->iciCall->IsImplicitTailCall()) { JITDUMP("fgInlineAppendStatements: implicit tail call; skipping nulling.\n"); return; } JITDUMP("fgInlineAppendStatements: nulling out gc ref inlinee locals.\n"); Statement* callStmt = inlineInfo->iciStmt; const DebugInfo& callDI = callStmt->GetDebugInfo(); CORINFO_METHOD_INFO* InlineeMethodInfo = InlineeCompiler->info.compMethodInfo; const unsigned lclCnt = InlineeMethodInfo->locals.numArgs; InlLclVarInfo* lclVarInfo = inlineInfo->lclVarInfo; unsigned gcRefLclCnt = inlineInfo->numberOfGcRefLocals; const unsigned argCnt = inlineInfo->argCnt; for (unsigned lclNum = 0; lclNum < lclCnt; lclNum++) { // Is the local a gc ref type? Need to look at the // inline info for this since we will not have local // temps for unused inlinee locals. const var_types lclTyp = lclVarInfo[argCnt + lclNum].lclTypeInfo; if (!varTypeIsGC(lclTyp)) { // Nope, nothing to null out. continue; } // Ensure we're examining just the right number of locals. assert(gcRefLclCnt > 0); gcRefLclCnt--; // Fetch the temp for this inline local const unsigned tmpNum = inlineInfo->lclTmpNum[lclNum]; // Is the local used at all? if (tmpNum == BAD_VAR_NUM) { // Nope, nothing to null out. continue; } // Local was used, make sure the type is consistent. assert(lvaTable[tmpNum].lvType == lclTyp); // Does the local we're about to null out appear in the return // expression? If so we somehow messed up and didn't properly // spill the return value. See impInlineFetchLocal. GenTree* retExpr = inlineInfo->retExpr; if (retExpr != nullptr) { const bool interferesWithReturn = gtHasRef(inlineInfo->retExpr, tmpNum); noway_assert(!interferesWithReturn); } // Assign null to the local. GenTree* nullExpr = gtNewTempAssign(tmpNum, gtNewZeroConNode(lclTyp)); Statement* nullStmt = gtNewStmt(nullExpr, callDI); if (stmtAfter == nullptr) { fgInsertStmtAtBeg(block, nullStmt); } else { fgInsertStmtAfter(block, stmtAfter, nullStmt); } stmtAfter = nullStmt; #ifdef DEBUG if (verbose) { gtDispStmt(nullStmt); } #endif // DEBUG } // There should not be any GC ref locals left to null out. assert(gcRefLclCnt == 0); } //------------------------------------------------------------------------ // fgNeedReturnSpillTemp: Answers does the inlinee need to spill all returns // as a temp. // // Return Value: // true if the inlinee has to spill return exprs. bool Compiler::fgNeedReturnSpillTemp() { assert(compIsForInlining()); return (lvaInlineeReturnSpillTemp != BAD_VAR_NUM); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif // Flowgraph Inline Support /*****************************************************************************/ //------------------------------------------------------------------------ // fgCheckForInlineDepthAndRecursion: compute depth of the candidate, and // check for recursion. // // Return Value: // The depth of the inline candidate. The root method is a depth 0, top-level // candidates at depth 1, etc. // // Notes: // We generally disallow recursive inlines by policy. However, they are // supported by the underlying machinery. // // Likewise the depth limit is a policy consideration, and serves mostly // as a safeguard to prevent runaway inlining of small methods. // unsigned Compiler::fgCheckInlineDepthAndRecursion(InlineInfo* inlineInfo) { BYTE* candidateCode = inlineInfo->inlineCandidateInfo->methInfo.ILCode; InlineContext* inlineContext = inlineInfo->inlineCandidateInfo->inlinersContext; InlineResult* inlineResult = inlineInfo->inlineResult; // There should be a context for all candidates. assert(inlineContext != nullptr); int depth = 0; for (; inlineContext != nullptr; inlineContext = inlineContext->GetParent()) { assert(inlineContext->GetCode() != nullptr); depth++; if (inlineContext->GetCode() == candidateCode) { // This inline candidate has the same IL code buffer as an already // inlined method does. inlineResult->NoteFatal(InlineObservation::CALLSITE_IS_RECURSIVE); // No need to note CALLSITE_DEPTH we're already rejecting this candidate return depth; } if (depth > InlineStrategy::IMPLEMENTATION_MAX_INLINE_DEPTH) { break; } } inlineResult->NoteInt(InlineObservation::CALLSITE_DEPTH, depth); return depth; } //------------------------------------------------------------------------ // fgInline - expand inline candidates // // Returns: // phase status indicating if anything was modified // // Notes: // Inline candidates are identified during importation and candidate calls // must be top-level expressions. In input IR, the result of the call (if any) // is consumed elsewhere by a GT_RET_EXPR node. // // For successful inlines, calls are replaced by a sequence of argument setup // instructions, the inlined method body, and return value cleanup. Note // Inlining may introduce new inline candidates. These are processed in a // depth-first fashion, as the inliner walks the IR in statement order. // // After inline expansion in a statement, the statement tree // is walked to locate GT_RET_EXPR nodes. These are replaced by either // * the original call tree, if the inline failed // * the return value tree from the inlinee, if the inline succeeded // // This replacement happens in preorder; on the postorder side of the same // tree walk, we look for opportunties to devirtualize or optimize now that // we know the context for the newly supplied return value tree. // // Inline arguments may be directly substituted into the body of the inlinee // in some cases. See impInlineFetchArg. // PhaseStatus Compiler::fgInline() { if (!opts.OptEnabled(CLFLG_INLINING)) { return PhaseStatus::MODIFIED_NOTHING; } #ifdef DEBUG fgPrintInlinedMethods = JitConfig.JitPrintInlinedMethods().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args); #endif // DEBUG noway_assert(fgFirstBB != nullptr); BasicBlock* block = fgFirstBB; bool madeChanges = false; do { // Make the current basic block address available globally compCurBB = block; for (Statement* const stmt : block->Statements()) { #if defined(DEBUG) || defined(INLINE_DATA) // In debug builds we want the inline tree to show all failed // inlines. Some inlines may fail very early and never make it to // candidate stage. So scan the tree looking for those early failures. fgWalkTreePre(stmt->GetRootNodePointer(), fgFindNonInlineCandidate, stmt); #endif GenTree* expr = stmt->GetRootNode(); // The importer ensures that all inline candidates are // statement expressions. So see if we have a call. if (expr->IsCall()) { GenTreeCall* call = expr->AsCall(); // We do. Is it an inline candidate? // // Note we also process GuardeDevirtualizationCandidates here as we've // split off GT_RET_EXPRs for them even when they are not inline candidates // as we need similar processing to ensure they get patched back to where // they belong. if (call->IsInlineCandidate() || call->IsGuardedDevirtualizationCandidate()) { InlineResult inlineResult(this, call, stmt, "fgInline"); fgMorphStmt = stmt; fgMorphCallInline(call, &inlineResult); // If there's a candidate to process, we will make changes madeChanges = true; // fgMorphCallInline may have updated the // statement expression to a GT_NOP if the // call returned a value, regardless of // whether the inline succeeded or failed. // // If so, remove the GT_NOP and continue // on with the next statement. if (stmt->GetRootNode()->IsNothingNode()) { fgRemoveStmt(block, stmt); continue; } } } // See if we need to replace some return value place holders. // Also, see if this replacement enables further devirtualization. // // Note we have both preorder and postorder callbacks here. // // The preorder callback is responsible for replacing GT_RET_EXPRs // with the appropriate expansion (call or inline result). // Replacement may introduce subtrees with GT_RET_EXPR and so // we rely on the preorder to recursively process those as well. // // On the way back up, the postorder callback then re-examines nodes for // possible further optimization, as the (now complete) GT_RET_EXPR // replacement may have enabled optimizations by providing more // specific types for trees or variables. fgWalkTree(stmt->GetRootNodePointer(), fgUpdateInlineReturnExpressionPlaceHolder, fgLateDevirtualization, (void*)&madeChanges); // See if stmt is of the form GT_COMMA(call, nop) // If yes, we can get rid of GT_COMMA. if (expr->OperGet() == GT_COMMA && expr->AsOp()->gtOp1->OperGet() == GT_CALL && expr->AsOp()->gtOp2->OperGet() == GT_NOP) { madeChanges = true; stmt->SetRootNode(expr->AsOp()->gtOp1); } } block = block->bbNext; } while (block); #ifdef DEBUG // Check that we should not have any inline candidate or return value place holder left. block = fgFirstBB; noway_assert(block); do { for (Statement* const stmt : block->Statements()) { // Call Compiler::fgDebugCheckInlineCandidates on each node fgWalkTreePre(stmt->GetRootNodePointer(), fgDebugCheckInlineCandidates); } block = block->bbNext; } while (block); fgVerifyHandlerTab(); if (verbose || fgPrintInlinedMethods) { JITDUMP("**************** Inline Tree"); printf("\n"); m_inlineStrategy->Dump(verbose || JitConfig.JitPrintInlinedMethodsVerbose()); } #endif // DEBUG return madeChanges ? PhaseStatus::MODIFIED_EVERYTHING : PhaseStatus::MODIFIED_NOTHING; } #if defined(DEBUG) || defined(INLINE_DATA) //------------------------------------------------------------------------ // fgFindNonInlineCandidate: tree walk helper to ensure that a tree node // that is not an inline candidate is noted as a failed inline. // // Arguments: // pTree - pointer to pointer tree node being walked // data - contextual data for the walk // // Return Value: // walk result // // Note: // Invokes fgNoteNonInlineCandidate on the nodes it finds. Compiler::fgWalkResult Compiler::fgFindNonInlineCandidate(GenTree** pTree, fgWalkData* data) { GenTree* tree = *pTree; if (tree->gtOper == GT_CALL) { Compiler* compiler = data->compiler; Statement* stmt = (Statement*)data->pCallbackData; GenTreeCall* call = tree->AsCall(); compiler->fgNoteNonInlineCandidate(stmt, call); } return WALK_CONTINUE; } //------------------------------------------------------------------------ // fgNoteNonInlineCandidate: account for inlining failures in calls // not marked as inline candidates. // // Arguments: // stmt - statement containing the call // call - the call itself // // Notes: // Used in debug only to try and place descriptions of inline failures // into the proper context in the inline tree. void Compiler::fgNoteNonInlineCandidate(Statement* stmt, GenTreeCall* call) { if (call->IsInlineCandidate() || call->IsGuardedDevirtualizationCandidate()) { return; } InlineResult inlineResult(this, call, nullptr, "fgNoteNonInlineCandidate"); InlineObservation currentObservation = InlineObservation::CALLSITE_NOT_CANDIDATE; // Try and recover the reason left behind when the jit decided // this call was not a candidate. InlineObservation priorObservation = call->gtInlineObservation; if (InlIsValidObservation(priorObservation)) { currentObservation = priorObservation; } // Propagate the prior failure observation to this result. inlineResult.NotePriorFailure(currentObservation); inlineResult.SetReported(); if (call->gtCallType == CT_USER_FUNC) { m_inlineStrategy->NewContext(call->gtInlineContext, stmt, call)->SetFailed(&inlineResult); } } #endif #if FEATURE_MULTIREG_RET /********************************************************************************* * * tree - The node which needs to be converted to a struct pointer. * * Return the pointer by either __replacing__ the tree node with a suitable pointer * type or __without replacing__ and just returning a subtree or by __modifying__ * a subtree. */ GenTree* Compiler::fgGetStructAsStructPtr(GenTree* tree) { noway_assert(tree->OperIs(GT_LCL_VAR, GT_FIELD, GT_IND, GT_BLK, GT_OBJ, GT_COMMA) || tree->OperIsSIMD() || tree->OperIsHWIntrinsic()); // GT_CALL, cannot get address of call. // GT_MKREFANY, inlining should've been aborted due to mkrefany opcode. // GT_RET_EXPR, cannot happen after fgUpdateInlineReturnExpressionPlaceHolder switch (tree->OperGet()) { case GT_BLK: case GT_OBJ: case GT_IND: return tree->AsOp()->gtOp1; case GT_COMMA: tree->AsOp()->gtOp2 = fgGetStructAsStructPtr(tree->AsOp()->gtOp2); tree->gtType = TYP_BYREF; return tree; default: return gtNewOperNode(GT_ADDR, TYP_BYREF, tree); } } /*************************************************************************************************** * child - The inlinee of the retExpr node. * retClsHnd - The struct class handle of the type of the inlinee. * * Assign the inlinee to a tmp, if it is a call, just assign it to a lclVar, else we can * use a copyblock to do the assignment. */ GenTree* Compiler::fgAssignStructInlineeToVar(GenTree* child, CORINFO_CLASS_HANDLE retClsHnd) { assert(child->gtOper != GT_RET_EXPR && child->gtOper != GT_MKREFANY); unsigned tmpNum = lvaGrabTemp(false DEBUGARG("RetBuf for struct inline return candidates.")); lvaSetStruct(tmpNum, retClsHnd, false); var_types structType = lvaTable[tmpNum].lvType; GenTree* dst = gtNewLclvNode(tmpNum, structType); // If we have a call, we'd like it to be: V00 = call(), but first check if // we have a ", , , call()" -- this is very defensive as we may never get // an inlinee that is made of commas. If the inlinee is not a call, then // we use a copy block to do the assignment. GenTree* src = child; GenTree* lastComma = nullptr; while (src->gtOper == GT_COMMA) { lastComma = src; src = src->AsOp()->gtOp2; } GenTree* newInlinee = nullptr; if (src->gtOper == GT_CALL) { // If inlinee was just a call, new inlinee is v05 = call() newInlinee = gtNewAssignNode(dst, src); // When returning a multi-register value in a local var, make sure the variable is // marked as lvIsMultiRegRet, so it does not get promoted. if (src->AsCall()->HasMultiRegRetVal()) { lvaTable[tmpNum].lvIsMultiRegRet = true; } // If inlinee was comma, but a deeper call, new inlinee is (, , , v05 = call()) if (child->gtOper == GT_COMMA) { lastComma->AsOp()->gtOp2 = newInlinee; newInlinee = child; } } else { // Inlinee is not a call, so just create a copy block to the tmp. src = child; GenTree* dstAddr = fgGetStructAsStructPtr(dst); GenTree* srcAddr = fgGetStructAsStructPtr(src); newInlinee = gtNewCpObjNode(dstAddr, srcAddr, retClsHnd, false); } GenTree* production = gtNewLclvNode(tmpNum, structType); return gtNewOperNode(GT_COMMA, structType, newInlinee, production); } /*************************************************************************************************** * tree - The tree pointer that has one of its child nodes as retExpr. * child - The inlinee child. * retClsHnd - The struct class handle of the type of the inlinee. * * V04 = call() assignments are okay as we codegen it. Everything else needs to be a copy block or * would need a temp. For example, a cast(ldobj) will then be, cast(v05 = ldobj, v05); But it is * a very rare (or impossible) scenario that we'd have a retExpr transform into a ldobj other than * a lclVar/call. So it is not worthwhile to do pattern matching optimizations like addr(ldobj(op1)) * can just be op1. */ void Compiler::fgAttachStructInlineeToAsg(GenTree* tree, GenTree* child, CORINFO_CLASS_HANDLE retClsHnd) { // We are okay to have: // 1. V02 = call(); // 2. copyBlk(dstAddr, srcAddr); assert(tree->gtOper == GT_ASG); // We have an assignment, we codegen only V05 = call(). if (child->gtOper == GT_CALL && tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { // If it is a multireg return on x64/ux, the local variable should be marked as lvIsMultiRegRet if (child->AsCall()->HasMultiRegRetVal()) { unsigned lclNum = tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); lvaTable[lclNum].lvIsMultiRegRet = true; } return; } GenTree* dstAddr = fgGetStructAsStructPtr(tree->AsOp()->gtOp1); GenTree* srcAddr = fgGetStructAsStructPtr( (child->gtOper == GT_CALL) ? fgAssignStructInlineeToVar(child, retClsHnd) // Assign to a variable if it is a call. : child); // Just get the address, if not a call. tree->ReplaceWith(gtNewCpObjNode(dstAddr, srcAddr, retClsHnd, false), this); } #endif // FEATURE_MULTIREG_RET //------------------------------------------------------------------------ // fgUpdateInlineReturnExpressionPlaceHolder: callback to replace the // inline return expression placeholder. // // Arguments: // pTree -- pointer to tree to examine for updates // data -- context data for the tree walk // // Returns: // fgWalkResult indicating the walk should continue; that // is we wish to fully explore the tree. // // Notes: // Looks for GT_RET_EXPR nodes that arose from tree splitting done // during importation for inline candidates, and replaces them. // // For successful inlines, substitutes the return value expression // from the inline body for the GT_RET_EXPR. // // For failed inlines, rejoins the original call into the tree from // whence it was split during importation. // // The code doesn't actually know if the corresponding inline // succeeded or not; it relies on the fact that gtInlineCandidate // initially points back at the call and is modified in place to // the inlinee return expression if the inline is successful (see // tail end of fgInsertInlineeBlocks for the update of iciCall). // // If the return type is a struct type and we're on a platform // where structs can be returned in multiple registers, ensure the // call has a suitable parent. // // If the original call type and the substitution type are different // the functions makes necessary updates. It could happen if there was // an implicit conversion in the inlinee body. // Compiler::fgWalkResult Compiler::fgUpdateInlineReturnExpressionPlaceHolder(GenTree** pTree, fgWalkData* data) { // All the operations here and in the corresponding postorder // callback (fgLateDevirtualization) are triggered by GT_CALL or // GT_RET_EXPR trees, and these (should) have the call side // effect flag. // // So bail out for any trees that don't have this flag. GenTree* tree = *pTree; if ((tree->gtFlags & GTF_CALL) == 0) { return WALK_SKIP_SUBTREES; } bool* madeChanges = static_cast<bool*>(data->pCallbackData); Compiler* comp = data->compiler; CORINFO_CLASS_HANDLE retClsHnd = NO_CLASS_HANDLE; while (tree->OperGet() == GT_RET_EXPR) { // We are going to copy the tree from the inlinee, // so record the handle now. // if (varTypeIsStruct(tree)) { retClsHnd = tree->AsRetExpr()->gtRetClsHnd; } // Skip through chains of GT_RET_EXPRs (say from nested inlines) // to the actual tree to use. // // Also we might as well try and fold the return value. // Eg returns of constant bools will have CASTS. // This folding may uncover more GT_RET_EXPRs, so we loop around // until we've got something distinct. // BasicBlockFlags bbFlags = BBF_EMPTY; GenTree* inlineCandidate = tree->gtRetExprVal(&bbFlags); inlineCandidate = comp->gtFoldExpr(inlineCandidate); var_types retType = tree->TypeGet(); #ifdef DEBUG if (comp->verbose) { printf("\nReplacing the return expression placeholder "); printTreeID(tree); printf(" with "); printTreeID(inlineCandidate); printf("\n"); // Dump out the old return expression placeholder it will be overwritten by the ReplaceWith below comp->gtDispTree(tree); } #endif // DEBUG var_types newType = inlineCandidate->TypeGet(); // If we end up swapping type we may need to retype the tree: if (retType != newType) { if ((retType == TYP_BYREF) && (tree->OperGet() == GT_IND)) { // - in an RVA static if we've reinterpreted it as a byref; assert(newType == TYP_I_IMPL); JITDUMP("Updating type of the return GT_IND expression to TYP_BYREF\n"); inlineCandidate->gtType = TYP_BYREF; } else { // - under a call if we changed size of the argument. GenTree* putArgType = comp->fgCheckCallArgUpdate(data->parent, inlineCandidate, retType); if (putArgType != nullptr) { inlineCandidate = putArgType; } } } tree->ReplaceWith(inlineCandidate, comp); *madeChanges = true; comp->compCurBB->bbFlags |= (bbFlags & BBF_SPLIT_GAINED); #ifdef DEBUG if (comp->verbose) { printf("\nInserting the inline return expression\n"); comp->gtDispTree(tree); printf("\n"); } #endif // DEBUG } // If an inline was rejected and the call returns a struct, we may // have deferred some work when importing call for cases where the // struct is returned in register(s). // // See the bail-out clauses in impFixupCallStructReturn for inline // candidates. // // Do the deferred work now. if (retClsHnd != NO_CLASS_HANDLE) { structPassingKind howToReturnStruct; var_types returnType = comp->getReturnTypeForStruct(retClsHnd, CorInfoCallConvExtension::Managed, &howToReturnStruct); GenTree* parent = data->parent; switch (howToReturnStruct) { #if FEATURE_MULTIREG_RET // Is this a type that is returned in multiple registers // or a via a primitve type that is larger than the struct type? // if so we need to force into into a form we accept. // i.e. LclVar = call() case SPK_ByValue: case SPK_ByValueAsHfa: { // See assert below, we only look one level above for an asg parent. if (parent->gtOper == GT_ASG) { // Either lhs is a call V05 = call(); or lhs is addr, and asg becomes a copyBlk. comp->fgAttachStructInlineeToAsg(parent, tree, retClsHnd); } else { // Just assign the inlinee to a variable to keep it simple. tree->ReplaceWith(comp->fgAssignStructInlineeToVar(tree, retClsHnd), comp); } *madeChanges = true; } break; #endif // FEATURE_MULTIREG_RET case SPK_EnclosingType: case SPK_PrimitiveType: // No work needs to be done, the call has struct type and should keep it. break; case SPK_ByReference: // We should have already added the return buffer // when we first imported the call break; default: noway_assert(!"Unexpected struct passing kind"); break; } } #if FEATURE_MULTIREG_RET #if defined(DEBUG) // Make sure we don't have a tree like so: V05 = (, , , retExpr); // Since we only look one level above for the parent for '=' and // do not check if there is a series of COMMAs. See above. // Importer and FlowGraph will not generate such a tree, so just // leaving an assert in here. This can be fixed by looking ahead // when we visit GT_ASG similar to fgAttachStructInlineeToAsg. // if (tree->OperGet() == GT_ASG) { GenTree* value = tree->AsOp()->gtOp2; if (value->OperGet() == GT_COMMA) { GenTree* effectiveValue = value->gtEffectiveVal(/*commaOnly*/ true); noway_assert(!varTypeIsStruct(effectiveValue) || (effectiveValue->OperGet() != GT_RET_EXPR) || !comp->IsMultiRegReturnedType(effectiveValue->AsRetExpr()->gtRetClsHnd, CorInfoCallConvExtension::Managed)); } } #endif // defined(DEBUG) #endif // FEATURE_MULTIREG_RET return WALK_CONTINUE; } //------------------------------------------------------------------------ // fgLateDevirtualization: re-examine calls after inlining to see if we // can do more devirtualization // // Arguments: // pTree -- pointer to tree to examine for updates // data -- context data for the tree walk // // Returns: // fgWalkResult indicating the walk should continue; that // is we wish to fully explore the tree. // // Notes: // We used to check this opportunistically in the preorder callback for // calls where the `obj` was fed by a return, but we now re-examine // all calls. // // Late devirtualization (and eventually, perhaps, other type-driven // opts like cast optimization) can happen now because inlining or other // optimizations may have provided more accurate types than we saw when // first importing the trees. // // It would be nice to screen candidate sites based on the likelihood // that something has changed. Otherwise we'll waste some time retrying // an optimization that will just fail again. Compiler::fgWalkResult Compiler::fgLateDevirtualization(GenTree** pTree, fgWalkData* data) { GenTree* tree = *pTree; GenTree* parent = data->parent; Compiler* comp = data->compiler; bool* madeChanges = static_cast<bool*>(data->pCallbackData); // In some (rare) cases the parent node of tree will be smashed to a NOP during // the preorder by fgAttachStructToInlineeArg. // // jit\Methodical\VT\callconv\_il_reljumper3 for x64 linux // // If so, just bail out here. if (tree == nullptr) { assert((parent != nullptr) && parent->OperGet() == GT_NOP); return WALK_CONTINUE; } if (tree->OperGet() == GT_CALL) { GenTreeCall* call = tree->AsCall(); bool tryLateDevirt = call->IsVirtual() && (call->gtCallType == CT_USER_FUNC); #ifdef DEBUG tryLateDevirt = tryLateDevirt && (JitConfig.JitEnableLateDevirtualization() == 1); #endif // DEBUG if (tryLateDevirt) { #ifdef DEBUG if (comp->verbose) { printf("**** Late devirt opportunity\n"); comp->gtDispTree(call); } #endif // DEBUG CORINFO_CONTEXT_HANDLE context = nullptr; CORINFO_METHOD_HANDLE method = call->gtCallMethHnd; unsigned methodFlags = 0; const bool isLateDevirtualization = true; const bool explicitTailCall = call->IsTailPrefixedCall(); if ((call->gtCallMoreFlags & GTF_CALL_M_LATE_DEVIRT) != 0) { context = call->gtLateDevirtualizationInfo->exactContextHnd; call->gtLateDevirtualizationInfo = nullptr; } comp->impDevirtualizeCall(call, nullptr, &method, &methodFlags, &context, nullptr, isLateDevirtualization, explicitTailCall); *madeChanges = true; } } else if (tree->OperGet() == GT_ASG) { // If we're assigning to a ref typed local that has one definition, // we may be able to sharpen the type for the local. GenTree* const effLhs = tree->gtGetOp1()->gtEffectiveVal(); if ((effLhs->OperGet() == GT_LCL_VAR) && (effLhs->TypeGet() == TYP_REF)) { const unsigned lclNum = effLhs->AsLclVarCommon()->GetLclNum(); LclVarDsc* lcl = comp->lvaGetDesc(lclNum); if (lcl->lvSingleDef) { GenTree* rhs = tree->gtGetOp2(); bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE newClass = comp->gtGetClassHandle(rhs, &isExact, &isNonNull); if (newClass != NO_CLASS_HANDLE) { comp->lvaUpdateClass(lclNum, newClass, isExact); *madeChanges = true; } } } // If we created a self-assignment (say because we are sharing return spill temps) // we can remove it. // GenTree* const lhs = tree->gtGetOp1(); GenTree* const rhs = tree->gtGetOp2(); if (lhs->OperIs(GT_LCL_VAR) && GenTree::Compare(lhs, rhs)) { comp->gtUpdateNodeSideEffects(tree); assert((tree->gtFlags & GTF_SIDE_EFFECT) == GTF_ASG); JITDUMP("... removing self-assignment\n"); DISPTREE(tree); tree->gtBashToNOP(); *madeChanges = true; } } else if (tree->OperGet() == GT_JTRUE) { // See if this jtrue is now foldable. BasicBlock* block = comp->compCurBB; GenTree* condTree = tree->AsOp()->gtOp1; assert(tree == block->lastStmt()->GetRootNode()); if (condTree->OperGet() == GT_CNS_INT) { JITDUMP(" ... found foldable jtrue at [%06u] in " FMT_BB "\n", dspTreeID(tree), block->bbNum); noway_assert((block->bbNext->countOfInEdges() > 0) && (block->bbJumpDest->countOfInEdges() > 0)); // We have a constant operand, and should have the all clear to optimize. // Update side effects on the tree, assert there aren't any, and bash to nop. comp->gtUpdateNodeSideEffects(tree); assert((tree->gtFlags & GTF_SIDE_EFFECT) == 0); tree->gtBashToNOP(); *madeChanges = true; BasicBlock* bNotTaken = nullptr; if (condTree->AsIntCon()->gtIconVal != 0) { block->bbJumpKind = BBJ_ALWAYS; bNotTaken = block->bbNext; } else { block->bbJumpKind = BBJ_NONE; bNotTaken = block->bbJumpDest; } comp->fgRemoveRefPred(bNotTaken, block); // If that was the last ref, a subsequent flow-opt pass // will clean up the now-unreachable bNotTaken, and any // other transitively unreachable blocks. if (bNotTaken->bbRefs == 0) { JITDUMP("... it looks like " FMT_BB " is now unreachable!\n", bNotTaken->bbNum); } } } else { const var_types retType = tree->TypeGet(); GenTree* foldedTree = comp->gtFoldExpr(tree); GenTree* putArgType = comp->fgCheckCallArgUpdate(data->parent, foldedTree, retType); if (putArgType != nullptr) { foldedTree = putArgType; } *pTree = foldedTree; *madeChanges = true; } return WALK_CONTINUE; } #ifdef DEBUG /***************************************************************************** * Callback to make sure there is no more GT_RET_EXPR and GTF_CALL_INLINE_CANDIDATE nodes. */ /* static */ Compiler::fgWalkResult Compiler::fgDebugCheckInlineCandidates(GenTree** pTree, fgWalkData* data) { GenTree* tree = *pTree; if (tree->gtOper == GT_CALL) { assert((tree->gtFlags & GTF_CALL_INLINE_CANDIDATE) == 0); } else { assert(tree->gtOper != GT_RET_EXPR); } return WALK_CONTINUE; } #endif // DEBUG void Compiler::fgInvokeInlineeCompiler(GenTreeCall* call, InlineResult* inlineResult, InlineContext** createdContext) { noway_assert(call->gtOper == GT_CALL); noway_assert((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0); noway_assert(opts.OptEnabled(CLFLG_INLINING)); // This is the InlineInfo struct representing a method to be inlined. InlineInfo inlineInfo; memset(&inlineInfo, 0, sizeof(inlineInfo)); CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd; inlineInfo.fncHandle = fncHandle; inlineInfo.iciCall = call; inlineInfo.iciStmt = fgMorphStmt; inlineInfo.iciBlock = compCurBB; inlineInfo.thisDereferencedFirst = false; inlineInfo.retExpr = nullptr; inlineInfo.retBB = nullptr; inlineInfo.retExprClassHnd = nullptr; inlineInfo.retExprClassHndIsExact = false; inlineInfo.inlineResult = inlineResult; #ifdef FEATURE_SIMD inlineInfo.hasSIMDTypeArgLocalOrReturn = false; #endif // FEATURE_SIMD InlineCandidateInfo* inlineCandidateInfo = call->gtInlineCandidateInfo; noway_assert(inlineCandidateInfo); // Store the link to inlineCandidateInfo into inlineInfo inlineInfo.inlineCandidateInfo = inlineCandidateInfo; unsigned inlineDepth = fgCheckInlineDepthAndRecursion(&inlineInfo); if (inlineResult->IsFailure()) { #ifdef DEBUG if (verbose) { printf("Recursive or deep inline recursion detected. Will not expand this INLINECANDIDATE \n"); } #endif // DEBUG return; } // Set the trap to catch all errors (including recoverable ones from the EE) struct Param { Compiler* pThis; GenTree* call; CORINFO_METHOD_HANDLE fncHandle; InlineCandidateInfo* inlineCandidateInfo; InlineInfo* inlineInfo; } param; memset(&param, 0, sizeof(param)); param.pThis = this; param.call = call; param.fncHandle = fncHandle; param.inlineCandidateInfo = inlineCandidateInfo; param.inlineInfo = &inlineInfo; bool success = eeRunWithErrorTrap<Param>( [](Param* pParam) { // Init the local var info of the inlinee pParam->pThis->impInlineInitVars(pParam->inlineInfo); if (pParam->inlineInfo->inlineResult->IsCandidate()) { /* Clear the temp table */ memset(pParam->inlineInfo->lclTmpNum, -1, sizeof(pParam->inlineInfo->lclTmpNum)); // // Prepare the call to jitNativeCode // pParam->inlineInfo->InlinerCompiler = pParam->pThis; if (pParam->pThis->impInlineInfo == nullptr) { pParam->inlineInfo->InlineRoot = pParam->pThis; } else { pParam->inlineInfo->InlineRoot = pParam->pThis->impInlineInfo->InlineRoot; } // The inline context is part of debug info and must be created // before we start creating statements; we lazily create it as // late as possible, which is here. pParam->inlineInfo->inlineContext = pParam->inlineInfo->InlineRoot->m_inlineStrategy ->NewContext(pParam->inlineInfo->inlineCandidateInfo->inlinersContext, pParam->inlineInfo->iciStmt, pParam->inlineInfo->iciCall); pParam->inlineInfo->argCnt = pParam->inlineCandidateInfo->methInfo.args.totalILArgs(); pParam->inlineInfo->tokenLookupContextHandle = pParam->inlineCandidateInfo->exactContextHnd; JITLOG_THIS(pParam->pThis, (LL_INFO100000, "INLINER: inlineInfo.tokenLookupContextHandle for %s set to 0x%p:\n", pParam->pThis->eeGetMethodFullName(pParam->fncHandle), pParam->pThis->dspPtr(pParam->inlineInfo->tokenLookupContextHandle))); JitFlags compileFlagsForInlinee = *pParam->pThis->opts.jitFlags; // The following flags are lost when inlining. // (This is checked in Compiler::compInitOptions().) compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_BBINSTR); compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_PROF_ENTERLEAVE); compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_DEBUG_EnC); compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_REVERSE_PINVOKE); compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_TRACK_TRANSITIONS); compileFlagsForInlinee.Set(JitFlags::JIT_FLAG_SKIP_VERIFICATION); #ifdef DEBUG if (pParam->pThis->verbose) { printf("\nInvoking compiler for the inlinee method %s :\n", pParam->pThis->eeGetMethodFullName(pParam->fncHandle)); } #endif // DEBUG int result = jitNativeCode(pParam->fncHandle, pParam->inlineCandidateInfo->methInfo.scope, pParam->pThis->info.compCompHnd, &pParam->inlineCandidateInfo->methInfo, (void**)pParam->inlineInfo, nullptr, &compileFlagsForInlinee, pParam->inlineInfo); if (result != CORJIT_OK) { // If we haven't yet determined why this inline fails, use // a catch-all something bad happened observation. InlineResult* innerInlineResult = pParam->inlineInfo->inlineResult; if (!innerInlineResult->IsFailure()) { innerInlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_FAILURE); } } } }, &param); if (!success) { #ifdef DEBUG if (verbose) { printf("\nInlining failed due to an exception during invoking the compiler for the inlinee method %s.\n", eeGetMethodFullName(fncHandle)); } #endif // DEBUG // If we haven't yet determined why this inline fails, use // a catch-all something bad happened observation. if (!inlineResult->IsFailure()) { inlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); } } *createdContext = inlineInfo.inlineContext; if (inlineResult->IsFailure()) { return; } #ifdef DEBUG if (0 && verbose) { printf("\nDone invoking compiler for the inlinee method %s\n", eeGetMethodFullName(fncHandle)); } #endif // DEBUG // If there is non-NULL return, but we haven't set the pInlineInfo->retExpr, // That means we haven't imported any BB that contains CEE_RET opcode. // (This could happen for example for a BBJ_THROW block fall through a BBJ_RETURN block which // causes the BBJ_RETURN block not to be imported at all.) // Fail the inlining attempt if (inlineCandidateInfo->fncRetType != TYP_VOID && inlineInfo.retExpr == nullptr) { #ifdef DEBUG if (verbose) { printf("\nInlining failed because pInlineInfo->retExpr is not set in the inlinee method %s.\n", eeGetMethodFullName(fncHandle)); } #endif // DEBUG inlineResult->NoteFatal(InlineObservation::CALLEE_LACKS_RETURN); return; } // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! // The inlining attempt cannot be failed starting from this point. // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! // We've successfully obtain the list of inlinee's basic blocks. // Let's insert it to inliner's basic block list. fgInsertInlineeBlocks(&inlineInfo); #ifdef DEBUG if (verbose) { printf("Successfully inlined %s (%d IL bytes) (depth %d) [%s]\n", eeGetMethodFullName(fncHandle), inlineCandidateInfo->methInfo.ILCodeSize, inlineDepth, inlineResult->ReasonString()); } if (verbose) { printf("--------------------------------------------------------------------------------------------\n"); } #endif // DEBUG #if defined(DEBUG) impInlinedCodeSize += inlineCandidateInfo->methInfo.ILCodeSize; #endif // We inlined... inlineResult->NoteSuccess(); } //------------------------------------------------------------------------ // fgInsertInlineeBlocks: incorporate statements for an inline into the // root method. // // Arguments: // inlineInfo -- info for the inline // // Notes: // The inlining attempt cannot be failed once this method is called. // // Adds all inlinee statements, plus any glue statements needed // either before or after the inlined call. // // Updates flow graph and assigns weights to inlinee // blocks. Currently does not attempt to read IBC data for the // inlinee. // // Updates relevant root method status flags (eg optMethodFlags) to // include information from the inlinee. // // Marks newly added statements with an appropriate inline context. void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) { GenTreeCall* iciCall = pInlineInfo->iciCall; Statement* iciStmt = pInlineInfo->iciStmt; BasicBlock* iciBlock = pInlineInfo->iciBlock; noway_assert(iciBlock->bbStmtList != nullptr); noway_assert(iciStmt->GetRootNode() != nullptr); assert(iciStmt->GetRootNode() == iciCall); noway_assert(iciCall->gtOper == GT_CALL); #ifdef DEBUG Statement* currentDumpStmt = nullptr; if (verbose) { printf("\n\n----------- Statements (and blocks) added due to the inlining of call "); printTreeID(iciCall); printf(" -----------\n"); } #endif // DEBUG // Mark success. pInlineInfo->inlineContext->SetSucceeded(pInlineInfo); // Prepend statements Statement* stmtAfter = fgInlinePrependStatements(pInlineInfo); #ifdef DEBUG if (verbose) { currentDumpStmt = stmtAfter; printf("\nInlinee method body:"); } #endif // DEBUG BasicBlock* topBlock = iciBlock; BasicBlock* bottomBlock = nullptr; if (InlineeCompiler->fgBBcount == 1) { // When fgBBCount is 1 we will always have a non-NULL fgFirstBB // PREFAST_ASSUME(InlineeCompiler->fgFirstBB != nullptr); // DDB 91389: Don't throw away the (only) inlinee block // when its return type is not BBJ_RETURN. // In other words, we need its BBJ_ to perform the right thing. if (InlineeCompiler->fgFirstBB->bbJumpKind == BBJ_RETURN) { // Inlinee contains just one BB. So just insert its statement list to topBlock. if (InlineeCompiler->fgFirstBB->bbStmtList != nullptr) { stmtAfter = fgInsertStmtListAfter(iciBlock, stmtAfter, InlineeCompiler->fgFirstBB->firstStmt()); } // Copy inlinee bbFlags to caller bbFlags. const BasicBlockFlags inlineeBlockFlags = InlineeCompiler->fgFirstBB->bbFlags; noway_assert((inlineeBlockFlags & BBF_HAS_JMP) == 0); noway_assert((inlineeBlockFlags & BBF_KEEP_BBJ_ALWAYS) == 0); // Todo: we may want to exclude other flags here. iciBlock->bbFlags |= (inlineeBlockFlags & ~BBF_RUN_RARELY); #ifdef DEBUG if (verbose) { noway_assert(currentDumpStmt); if (currentDumpStmt != stmtAfter) { do { currentDumpStmt = currentDumpStmt->GetNextStmt(); printf("\n"); gtDispStmt(currentDumpStmt); printf("\n"); } while (currentDumpStmt != stmtAfter); } } #endif // DEBUG // Append statements to null out gc ref locals, if necessary. fgInlineAppendStatements(pInlineInfo, iciBlock, stmtAfter); goto _Done; } } // // ======= Inserting inlinee's basic blocks =============== // bottomBlock = fgNewBBafter(topBlock->bbJumpKind, topBlock, true); bottomBlock->bbRefs = 1; bottomBlock->bbJumpDest = topBlock->bbJumpDest; bottomBlock->inheritWeight(topBlock); topBlock->bbJumpKind = BBJ_NONE; // Update block flags { const BasicBlockFlags originalFlags = topBlock->bbFlags; noway_assert((originalFlags & BBF_SPLIT_NONEXIST) == 0); topBlock->bbFlags &= ~(BBF_SPLIT_LOST); bottomBlock->bbFlags |= originalFlags & BBF_SPLIT_GAINED; } // Split statements between topBlock and bottomBlock. // First figure out bottomBlock_Begin Statement* bottomBlock_Begin; bottomBlock_Begin = stmtAfter->GetNextStmt(); if (topBlock->bbStmtList == nullptr) { // topBlock is empty before the split. // In this case, both topBlock and bottomBlock should be empty noway_assert(bottomBlock_Begin == nullptr); topBlock->bbStmtList = nullptr; bottomBlock->bbStmtList = nullptr; } else if (topBlock->bbStmtList == bottomBlock_Begin) { noway_assert(bottomBlock_Begin != nullptr); // topBlock contains at least one statement before the split. // And the split is before the first statement. // In this case, topBlock should be empty, and everything else should be moved to the bottomBlock. bottomBlock->bbStmtList = topBlock->bbStmtList; topBlock->bbStmtList = nullptr; } else if (bottomBlock_Begin == nullptr) { noway_assert(topBlock->bbStmtList != nullptr); // topBlock contains at least one statement before the split. // And the split is at the end of the topBlock. // In this case, everything should be kept in the topBlock, and the bottomBlock should be empty bottomBlock->bbStmtList = nullptr; } else { noway_assert(topBlock->bbStmtList != nullptr); noway_assert(bottomBlock_Begin != nullptr); // This is the normal case where both blocks should contain at least one statement. Statement* topBlock_Begin = topBlock->firstStmt(); noway_assert(topBlock_Begin != nullptr); Statement* topBlock_End = bottomBlock_Begin->GetPrevStmt(); noway_assert(topBlock_End != nullptr); Statement* bottomBlock_End = topBlock->lastStmt(); noway_assert(bottomBlock_End != nullptr); // Break the linkage between 2 blocks. topBlock_End->SetNextStmt(nullptr); // Fix up all the pointers. topBlock->bbStmtList = topBlock_Begin; topBlock->bbStmtList->SetPrevStmt(topBlock_End); bottomBlock->bbStmtList = bottomBlock_Begin; bottomBlock->bbStmtList->SetPrevStmt(bottomBlock_End); } // // Set the try and handler index and fix the jump types of inlinee's blocks. // for (BasicBlock* const block : InlineeCompiler->Blocks()) { noway_assert(!block->hasTryIndex()); noway_assert(!block->hasHndIndex()); block->copyEHRegion(iciBlock); block->bbFlags |= iciBlock->bbFlags & BBF_BACKWARD_JUMP; DebugInfo di = iciStmt->GetDebugInfo().GetRoot(); if (di.IsValid()) { block->bbCodeOffs = di.GetLocation().GetOffset(); block->bbCodeOffsEnd = block->bbCodeOffs + 1; // TODO: is code size of 1 some magic number for inlining? } else { block->bbCodeOffs = 0; // TODO: why not BAD_IL_OFFSET? block->bbCodeOffsEnd = 0; block->bbFlags |= BBF_INTERNAL; } if (block->bbJumpKind == BBJ_RETURN) { noway_assert((block->bbFlags & BBF_HAS_JMP) == 0); if (block->bbNext) { JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_ALWAYS to bottomBlock " FMT_BB "\n", block->bbNum, bottomBlock->bbNum); block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = bottomBlock; } else { JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_NONE\n", block->bbNum); block->bbJumpKind = BBJ_NONE; } } } // Insert inlinee's blocks into inliner's block list. topBlock->setNext(InlineeCompiler->fgFirstBB); InlineeCompiler->fgLastBB->setNext(bottomBlock); // // Add inlinee's block count to inliner's. // fgBBcount += InlineeCompiler->fgBBcount; // Append statements to null out gc ref locals, if necessary. fgInlineAppendStatements(pInlineInfo, bottomBlock, nullptr); #ifdef DEBUG if (verbose) { fgDispBasicBlocks(InlineeCompiler->fgFirstBB, InlineeCompiler->fgLastBB, true); } #endif // DEBUG _Done: // // At this point, we have successully inserted inlinee's code. // // // Copy out some flags // compLongUsed |= InlineeCompiler->compLongUsed; compFloatingPointUsed |= InlineeCompiler->compFloatingPointUsed; compLocallocUsed |= InlineeCompiler->compLocallocUsed; compLocallocOptimized |= InlineeCompiler->compLocallocOptimized; compQmarkUsed |= InlineeCompiler->compQmarkUsed; compGSReorderStackLayout |= InlineeCompiler->compGSReorderStackLayout; compHasBackwardJump |= InlineeCompiler->compHasBackwardJump; lvaGenericsContextInUse |= InlineeCompiler->lvaGenericsContextInUse; #ifdef FEATURE_SIMD if (InlineeCompiler->usesSIMDTypes()) { setUsesSIMDTypes(true); } #endif // FEATURE_SIMD // Update unmanaged call details info.compUnmanagedCallCountWithGCTransition += InlineeCompiler->info.compUnmanagedCallCountWithGCTransition; // Update stats for inlinee PGO // if (InlineeCompiler->fgPgoSchema != nullptr) { fgPgoInlineePgo++; } else if (InlineeCompiler->fgPgoFailReason != nullptr) { // Single block inlinees may not have probes // when we've ensabled minimal profiling (which // is now the default). // if (InlineeCompiler->fgBBcount == 1) { fgPgoInlineeNoPgoSingleBlock++; } else { fgPgoInlineeNoPgo++; } } // Update optMethodFlags CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG unsigned optMethodFlagsBefore = optMethodFlags; #endif optMethodFlags |= InlineeCompiler->optMethodFlags; #ifdef DEBUG if (optMethodFlags != optMethodFlagsBefore) { JITDUMP("INLINER: Updating optMethodFlags -- root:%0x callee:%0x new:%0x\n", optMethodFlagsBefore, InlineeCompiler->optMethodFlags, optMethodFlags); } #endif // If an inlinee needs GS cookie we need to make sure that the cookie will not be allocated at zero stack offset. // Note that if the root method needs GS cookie then this has already been taken care of. if (!getNeedsGSSecurityCookie() && InlineeCompiler->getNeedsGSSecurityCookie()) { setNeedsGSSecurityCookie(); const unsigned dummy = lvaGrabTempWithImplicitUse(false DEBUGARG("GSCookie dummy for inlinee")); LclVarDsc* gsCookieDummy = lvaGetDesc(dummy); gsCookieDummy->lvType = TYP_INT; gsCookieDummy->lvIsTemp = true; // It is not alive at all, set the flag to prevent zero-init. lvaSetVarDoNotEnregister(dummy DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } // If there is non-NULL return, replace the GT_CALL with its return value expression, // so later it will be picked up by the GT_RET_EXPR node. if ((pInlineInfo->inlineCandidateInfo->fncRetType != TYP_VOID) || (iciCall->gtReturnType == TYP_STRUCT)) { noway_assert(pInlineInfo->retExpr); #ifdef DEBUG if (verbose) { printf("\nReturn expression for call at "); printTreeID(iciCall); printf(" is\n"); gtDispTree(pInlineInfo->retExpr); } #endif // DEBUG // Replace the call with the return expression. Note that iciCall won't be part of the IR // but may still be referenced from a GT_RET_EXPR node. We will replace GT_RET_EXPR node // in fgUpdateInlineReturnExpressionPlaceHolder. At that time we will also update the flags // on the basic block of GT_RET_EXPR node. if (iciCall->gtInlineCandidateInfo->retExpr->OperGet() == GT_RET_EXPR) { // Save the basic block flags from the retExpr basic block. iciCall->gtInlineCandidateInfo->retExpr->AsRetExpr()->bbFlags = pInlineInfo->retBB->bbFlags; } if (bottomBlock != nullptr) { // We've split the iciblock into two and the RET_EXPR was possibly moved to the bottomBlock // so let's update its flags with retBB's ones bottomBlock->bbFlags |= pInlineInfo->retBB->bbFlags & BBF_COMPACT_UPD; } iciCall->ReplaceWith(pInlineInfo->retExpr, this); } // // Detach the GT_CALL node from the original statement by hanging a "nothing" node under it, // so that fgMorphStmts can remove the statement once we return from here. // iciStmt->SetRootNode(gtNewNothingNode()); } //------------------------------------------------------------------------ // fgInlinePrependStatements: prepend statements needed to match up // caller and inlined callee // // Arguments: // inlineInfo -- info for the inline // // Return Value: // The last statement that was added, or the original call if no // statements were added. // // Notes: // Statements prepended may include the following: // * This pointer null check // * Class initialization // * Zeroing of must-init locals in the callee // * Passing of call arguments via temps // // Newly added statements are placed just after the original call // and are are given the same inline context as the call any calls // added here will appear to have been part of the immediate caller. Statement* Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo) { BasicBlock* block = inlineInfo->iciBlock; Statement* callStmt = inlineInfo->iciStmt; const DebugInfo& callDI = callStmt->GetDebugInfo(); Statement* postStmt = callStmt->GetNextStmt(); Statement* afterStmt = callStmt; // afterStmt is the place where the new statements should be inserted after. Statement* newStmt = nullptr; GenTreeCall* call = inlineInfo->iciCall->AsCall(); noway_assert(call->gtOper == GT_CALL); #ifdef DEBUG if (0 && verbose) { printf("\nfgInlinePrependStatements for iciCall= "); printTreeID(call); printf(":\n"); } #endif // Prepend statements for any initialization / side effects InlArgInfo* inlArgInfo = inlineInfo->inlArgInfo; InlLclVarInfo* lclVarInfo = inlineInfo->lclVarInfo; GenTree* tree; // Create the null check statement (but not appending it to the statement list yet) for the 'this' pointer if // necessary. // The NULL check should be done after "argument setup statements". // The only reason we move it here is for calling "impInlineFetchArg(0,..." to reserve a temp // for the "this" pointer. // Note: Here we no longer do the optimization that was done by thisDereferencedFirst in the old inliner. // However the assetionProp logic will remove any unecessary null checks that we may have added // GenTree* nullcheck = nullptr; if (call->gtFlags & GTF_CALL_NULLCHECK && !inlineInfo->thisDereferencedFirst) { // Call impInlineFetchArg to "reserve" a temp for the "this" pointer. GenTree* thisOp = impInlineFetchArg(0, inlArgInfo, lclVarInfo); if (fgAddrCouldBeNull(thisOp)) { nullcheck = gtNewNullCheck(thisOp, block); // The NULL-check statement will be inserted to the statement list after those statements // that assign arguments to temps and before the actual body of the inlinee method. } } /* Treat arguments that had to be assigned to temps */ if (inlineInfo->argCnt) { #ifdef DEBUG if (verbose) { printf("\nArguments setup:\n"); } #endif // DEBUG for (unsigned argNum = 0; argNum < inlineInfo->argCnt; argNum++) { const InlArgInfo& argInfo = inlArgInfo[argNum]; const bool argIsSingleDef = !argInfo.argHasLdargaOp && !argInfo.argHasStargOp; GenTree* argNode = inlArgInfo[argNum].argNode; const bool argHasPutArg = argNode->OperIs(GT_PUTARG_TYPE); BasicBlockFlags bbFlags = BBF_EMPTY; argNode = argNode->gtSkipPutArgType(); argNode = argNode->gtRetExprVal(&bbFlags); if (argInfo.argHasTmp) { noway_assert(argInfo.argIsUsed); /* argBashTmpNode is non-NULL iff the argument's value was referenced exactly once by the original IL. This offers an opportunity to avoid an intermediate temp and just insert the original argument tree. However, if the temp node has been cloned somewhere while importing (e.g. when handling isinst or dup), or if the IL took the address of the argument, then argBashTmpNode will be set (because the value was only explicitly retrieved once) but the optimization cannot be applied. */ GenTree* argSingleUseNode = argInfo.argBashTmpNode; // argHasPutArg disqualifies the arg from a direct substitution because we don't have information about // its user. For example: replace `LCL_VAR short` with `PUTARG_TYPE short->LCL_VAR int`, // we should keep `PUTARG_TYPE` iff the user is a call that needs `short` and delete it otherwise. if ((argSingleUseNode != nullptr) && !(argSingleUseNode->gtFlags & GTF_VAR_CLONED) && argIsSingleDef && !argHasPutArg) { // Change the temp in-place to the actual argument. // We currently do not support this for struct arguments, so it must not be a GT_OBJ. assert(argNode->gtOper != GT_OBJ); argSingleUseNode->ReplaceWith(argNode, this); continue; } else { // We're going to assign the argument value to the // temp we use for it in the inline body. const unsigned tmpNum = argInfo.argTmpNum; const var_types argType = lclVarInfo[argNum].lclTypeInfo; // Create the temp assignment for this argument CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE; if (varTypeIsStruct(argType)) { structHnd = gtGetStructHandleIfPresent(argNode); noway_assert((structHnd != NO_CLASS_HANDLE) || (argType != TYP_STRUCT)); } // Unsafe value cls check is not needed for // argTmpNum here since in-linee compiler instance // would have iterated over these and marked them // accordingly. impAssignTempGen(tmpNum, argNode, structHnd, (unsigned)CHECK_SPILL_NONE, &afterStmt, callDI, block); // We used to refine the temp type here based on // the actual arg, but we now do this up front, when // creating the temp, over in impInlineFetchArg. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { gtDispStmt(afterStmt); } #endif // DEBUG } block->bbFlags |= (bbFlags & BBF_SPLIT_GAINED); } else if (argInfo.argIsByRefToStructLocal) { // Do nothing. Arg was directly substituted as we read // the inlinee. } else { /* The argument is either not used or a const or lcl var */ noway_assert(!argInfo.argIsUsed || argInfo.argIsInvariant || argInfo.argIsLclVar); /* Make sure we didnt change argNode's along the way, or else subsequent uses of the arg would have worked with the bashed value */ if (argInfo.argIsInvariant) { assert(argNode->OperIsConst() || argNode->gtOper == GT_ADDR); } noway_assert((argInfo.argIsLclVar == 0) == (argNode->gtOper != GT_LCL_VAR || (argNode->gtFlags & GTF_GLOB_REF))); /* If the argument has side effects, append it */ if (argInfo.argHasSideEff) { noway_assert(argInfo.argIsUsed == false); newStmt = nullptr; bool append = true; if (argNode->gtOper == GT_OBJ || argNode->gtOper == GT_MKREFANY) { // Don't put GT_OBJ node under a GT_COMMA. // Codegen can't deal with it. // Just hang the address here in case there are side-effect. newStmt = gtNewStmt(gtUnusedValNode(argNode->AsOp()->gtOp1), callDI); } else { // In some special cases, unused args with side effects can // trigger further changes. // // (1) If the arg is a static field access and the field access // was produced by a call to EqualityComparer<T>.get_Default, the // helper call to ensure the field has a value can be suppressed. // This helper call is marked as a "Special DCE" helper during // importation, over in fgGetStaticsCCtorHelper. // // (2) NYI. If, after tunneling through GT_RET_VALs, we find that // the actual arg expression has no side effects, we can skip // appending all together. This will help jit TP a bit. // // Chase through any GT_RET_EXPRs to find the actual argument // expression. GenTree* actualArgNode = argNode->gtRetExprVal(&bbFlags); // For case (1) // // Look for the following tree shapes // prejit: (IND (ADD (CONST, CALL(special dce helper...)))) // jit : (COMMA (CALL(special dce helper...), (FIELD ...))) if (actualArgNode->gtOper == GT_COMMA) { // Look for (COMMA (CALL(special dce helper...), (FIELD ...))) GenTree* op1 = actualArgNode->AsOp()->gtOp1; GenTree* op2 = actualArgNode->AsOp()->gtOp2; if (op1->IsCall() && ((op1->AsCall()->gtCallMoreFlags & GTF_CALL_M_HELPER_SPECIAL_DCE) != 0) && (op2->gtOper == GT_FIELD) && ((op2->gtFlags & GTF_EXCEPT) == 0)) { JITDUMP("\nPerforming special dce on unused arg [%06u]:" " actual arg [%06u] helper call [%06u]\n", argNode->gtTreeID, actualArgNode->gtTreeID, op1->gtTreeID); // Drop the whole tree append = false; } } else if (actualArgNode->gtOper == GT_IND) { // Look for (IND (ADD (CONST, CALL(special dce helper...)))) GenTree* addr = actualArgNode->AsOp()->gtOp1; if (addr->gtOper == GT_ADD) { GenTree* op1 = addr->AsOp()->gtOp1; GenTree* op2 = addr->AsOp()->gtOp2; if (op1->IsCall() && ((op1->AsCall()->gtCallMoreFlags & GTF_CALL_M_HELPER_SPECIAL_DCE) != 0) && op2->IsCnsIntOrI()) { // Drop the whole tree JITDUMP("\nPerforming special dce on unused arg [%06u]:" " actual arg [%06u] helper call [%06u]\n", argNode->gtTreeID, actualArgNode->gtTreeID, op1->gtTreeID); append = false; } } } } if (!append) { assert(newStmt == nullptr); JITDUMP("Arg tree side effects were discardable, not appending anything for arg\n"); } else { // If we don't have something custom to append, // just append the arg node as an unused value. if (newStmt == nullptr) { newStmt = gtNewStmt(gtUnusedValNode(argNode), callDI); } fgInsertStmtAfter(block, afterStmt, newStmt); afterStmt = newStmt; #ifdef DEBUG if (verbose) { gtDispStmt(afterStmt); } #endif // DEBUG } } else if (argNode->IsBoxedValue()) { // Try to clean up any unnecessary boxing side effects // since the box itself will be ignored. gtTryRemoveBoxUpstreamEffects(argNode); } block->bbFlags |= (bbFlags & BBF_SPLIT_GAINED); } } } // Add the CCTOR check if asked for. // Note: We no longer do the optimization that is done before by staticAccessedFirstUsingHelper in the old inliner. // Therefore we might prepend redundant call to HELPER.CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE // before the inlined method body, even if a static field of this type was accessed in the inlinee // using a helper before any other observable side-effect. if (inlineInfo->inlineCandidateInfo->initClassResult & CORINFO_INITCLASS_USE_HELPER) { CORINFO_CLASS_HANDLE exactClass = eeGetClassFromContext(inlineInfo->inlineCandidateInfo->exactContextHnd); tree = fgGetSharedCCtor(exactClass); newStmt = gtNewStmt(tree, callDI); fgInsertStmtAfter(block, afterStmt, newStmt); afterStmt = newStmt; } // Insert the nullcheck statement now. if (nullcheck) { newStmt = gtNewStmt(nullcheck, callDI); fgInsertStmtAfter(block, afterStmt, newStmt); afterStmt = newStmt; } // // Now zero-init inlinee locals // CORINFO_METHOD_INFO* InlineeMethodInfo = InlineeCompiler->info.compMethodInfo; unsigned lclCnt = InlineeMethodInfo->locals.numArgs; bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; // If the callee contains zero-init locals, we need to explicitly initialize them if we are // in a loop or if the caller doesn't have compInitMem set. Otherwise we can rely on the // normal logic in the caller to insert zero-init in the prolog if necessary. if ((lclCnt != 0) && ((InlineeMethodInfo->options & CORINFO_OPT_INIT_LOCALS) != 0) && ((bbInALoop && !bbIsReturn) || !info.compInitMem)) { #ifdef DEBUG if (verbose) { printf("\nZero init inlinee locals:\n"); } #endif // DEBUG for (unsigned lclNum = 0; lclNum < lclCnt; lclNum++) { unsigned tmpNum = inlineInfo->lclTmpNum[lclNum]; // If the local is used check whether we need to insert explicit zero initialization. if (tmpNum != BAD_VAR_NUM) { LclVarDsc* const tmpDsc = lvaGetDesc(tmpNum); if (!fgVarNeedsExplicitZeroInit(tmpNum, bbInALoop, bbIsReturn)) { JITDUMP("\nSuppressing zero-init for V%02u -- expect to zero in prolog\n", tmpNum); tmpDsc->lvSuppressedZeroInit = 1; compSuppressedZeroInit = true; continue; } var_types lclTyp = (var_types)lvaTable[tmpNum].lvType; noway_assert(lclTyp == lclVarInfo[lclNum + inlineInfo->argCnt].lclTypeInfo); if (!varTypeIsStruct(lclTyp)) { // Unsafe value cls check is not needed here since in-linee compiler instance would have // iterated over locals and marked accordingly. impAssignTempGen(tmpNum, gtNewZeroConNode(genActualType(lclTyp)), NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_NONE, &afterStmt, callDI, block); } else { tree = gtNewBlkOpNode(gtNewLclvNode(tmpNum, lclTyp), // Dest gtNewIconNode(0), // Value false, // isVolatile false); // not copyBlock newStmt = gtNewStmt(tree, callDI); fgInsertStmtAfter(block, afterStmt, newStmt); afterStmt = newStmt; } #ifdef DEBUG if (verbose) { gtDispStmt(afterStmt); } #endif // DEBUG } } } return afterStmt; } //------------------------------------------------------------------------ // fgInlineAppendStatements: Append statements that are needed // after the inlined call. // // Arguments: // inlineInfo - information about the inline // block - basic block for the new statements // stmtAfter - (optional) insertion point for mid-block cases // // Notes: // If the call we're inlining is in tail position then // we skip nulling the locals, since it can interfere // with tail calls introduced by the local. void Compiler::fgInlineAppendStatements(InlineInfo* inlineInfo, BasicBlock* block, Statement* stmtAfter) { // Null out any gc ref locals if (!inlineInfo->HasGcRefLocals()) { // No ref locals, nothing to do. JITDUMP("fgInlineAppendStatements: no gc ref inline locals.\n"); return; } if (inlineInfo->iciCall->IsImplicitTailCall()) { JITDUMP("fgInlineAppendStatements: implicit tail call; skipping nulling.\n"); return; } JITDUMP("fgInlineAppendStatements: nulling out gc ref inlinee locals.\n"); Statement* callStmt = inlineInfo->iciStmt; const DebugInfo& callDI = callStmt->GetDebugInfo(); CORINFO_METHOD_INFO* InlineeMethodInfo = InlineeCompiler->info.compMethodInfo; const unsigned lclCnt = InlineeMethodInfo->locals.numArgs; InlLclVarInfo* lclVarInfo = inlineInfo->lclVarInfo; unsigned gcRefLclCnt = inlineInfo->numberOfGcRefLocals; const unsigned argCnt = inlineInfo->argCnt; for (unsigned lclNum = 0; lclNum < lclCnt; lclNum++) { // Is the local a gc ref type? Need to look at the // inline info for this since we will not have local // temps for unused inlinee locals. const var_types lclTyp = lclVarInfo[argCnt + lclNum].lclTypeInfo; if (!varTypeIsGC(lclTyp)) { // Nope, nothing to null out. continue; } // Ensure we're examining just the right number of locals. assert(gcRefLclCnt > 0); gcRefLclCnt--; // Fetch the temp for this inline local const unsigned tmpNum = inlineInfo->lclTmpNum[lclNum]; // Is the local used at all? if (tmpNum == BAD_VAR_NUM) { // Nope, nothing to null out. continue; } // Local was used, make sure the type is consistent. assert(lvaTable[tmpNum].lvType == lclTyp); // Does the local we're about to null out appear in the return // expression? If so we somehow messed up and didn't properly // spill the return value. See impInlineFetchLocal. GenTree* retExpr = inlineInfo->retExpr; if (retExpr != nullptr) { const bool interferesWithReturn = gtHasRef(inlineInfo->retExpr, tmpNum); noway_assert(!interferesWithReturn); } // Assign null to the local. GenTree* nullExpr = gtNewTempAssign(tmpNum, gtNewZeroConNode(lclTyp)); Statement* nullStmt = gtNewStmt(nullExpr, callDI); if (stmtAfter == nullptr) { fgInsertStmtAtBeg(block, nullStmt); } else { fgInsertStmtAfter(block, stmtAfter, nullStmt); } stmtAfter = nullStmt; #ifdef DEBUG if (verbose) { gtDispStmt(nullStmt); } #endif // DEBUG } // There should not be any GC ref locals left to null out. assert(gcRefLclCnt == 0); } //------------------------------------------------------------------------ // fgNeedReturnSpillTemp: Answers does the inlinee need to spill all returns // as a temp. // // Return Value: // true if the inlinee has to spill return exprs. bool Compiler::fgNeedReturnSpillTemp() { assert(compIsForInlining()); return (lvaInlineeReturnSpillTemp != BAD_VAR_NUM); }
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/vm/comthreadpool.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Header: COMThreadPool.h ** ** Purpose: Native methods on System.ThreadPool ** and its inner classes ** ** ===========================================================*/ #ifndef _COMTHREADPOOL_H #define _COMTHREADPOOL_H #include "delegateinfo.h" #include "nativeoverlapped.h" class ThreadPoolNative { public: static FCDECL4(INT32, GetNextConfigUInt32Value, INT32 configVariableIndex, UINT32 *configValueRef, BOOL *isBooleanRef, LPCWSTR *appContextConfigNameRef); static FCDECL1(FC_BOOL_RET, CorCanSetMinIOCompletionThreads, DWORD ioCompletionThreads); static FCDECL1(FC_BOOL_RET, CorCanSetMaxIOCompletionThreads, DWORD ioCompletionThreads); static FCDECL2(FC_BOOL_RET, CorSetMaxThreads, DWORD workerThreads, DWORD completionPortThreads); static FCDECL2(VOID, CorGetMaxThreads, DWORD* workerThreads, DWORD* completionPortThreads); static FCDECL2(FC_BOOL_RET, CorSetMinThreads, DWORD workerThreads, DWORD completionPortThreads); static FCDECL2(VOID, CorGetMinThreads, DWORD* workerThreads, DWORD* completionPortThreads); static FCDECL2(VOID, CorGetAvailableThreads, DWORD* workerThreads, DWORD* completionPortThreads); static FCDECL0(INT32, GetThreadCount); static FCDECL0(INT64, GetPendingUnmanagedWorkItemCount); static FCDECL0(VOID, NotifyRequestProgress); static FCDECL0(FC_BOOL_RET, NotifyRequestComplete); static FCDECL0(FC_BOOL_RET, GetEnableWorkerTracking); static FCDECL1(void, ReportThreadStatus, CLR_BOOL isWorking); static FCDECL5(LPVOID, CorRegisterWaitForSingleObject, Object* waitObjectUNSAFE, Object* stateUNSAFE, UINT32 timeout, CLR_BOOL executeOnlyOnce, Object* registeredWaitObjectUNSAFE); static FCDECL1(FC_BOOL_RET, CorPostQueuedCompletionStatus, LPOVERLAPPED lpOverlapped); static FCDECL2(FC_BOOL_RET, CorUnregisterWait, LPVOID WaitHandle, Object * objectToNotify); static FCDECL1(void, CorWaitHandleCleanupNative, LPVOID WaitHandle); static FCDECL1(FC_BOOL_RET, CorBindIoCompletionCallback, HANDLE fileHandle); }; extern "C" INT64 QCALLTYPE ThreadPool_GetCompletedWorkItemCount(); extern "C" BOOL QCALLTYPE ThreadPool_RequestWorkerThread(); extern "C" BOOL QCALLTYPE ThreadPool_PerformGateActivities(INT32 cpuUtilization); extern "C" HANDLE QCALLTYPE AppDomainTimer_Create(INT32 dueTime, INT32 timerId); extern "C" BOOL QCALLTYPE AppDomainTimer_Change(HANDLE hTimer, INT32 dueTime); extern "C" BOOL QCALLTYPE AppDomainTimer_Delete(HANDLE hTimer); VOID QueueUserWorkItemManagedCallback(PVOID pArg); void WINAPI BindIoCompletionCallbackStub(DWORD ErrorCode, DWORD numBytesTransferred, LPOVERLAPPED lpOverlapped); void SetAsyncResultProperties( OVERLAPPEDDATAREF overlapped, DWORD dwErrorCode, DWORD dwNumBytes); #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Header: COMThreadPool.h ** ** Purpose: Native methods on System.ThreadPool ** and its inner classes ** ** ===========================================================*/ #ifndef _COMTHREADPOOL_H #define _COMTHREADPOOL_H #include "delegateinfo.h" #include "nativeoverlapped.h" class ThreadPoolNative { public: static FCDECL4(INT32, GetNextConfigUInt32Value, INT32 configVariableIndex, UINT32 *configValueRef, BOOL *isBooleanRef, LPCWSTR *appContextConfigNameRef); static FCDECL1(FC_BOOL_RET, CorCanSetMinIOCompletionThreads, DWORD ioCompletionThreads); static FCDECL1(FC_BOOL_RET, CorCanSetMaxIOCompletionThreads, DWORD ioCompletionThreads); static FCDECL2(FC_BOOL_RET, CorSetMaxThreads, DWORD workerThreads, DWORD completionPortThreads); static FCDECL2(VOID, CorGetMaxThreads, DWORD* workerThreads, DWORD* completionPortThreads); static FCDECL2(FC_BOOL_RET, CorSetMinThreads, DWORD workerThreads, DWORD completionPortThreads); static FCDECL2(VOID, CorGetMinThreads, DWORD* workerThreads, DWORD* completionPortThreads); static FCDECL2(VOID, CorGetAvailableThreads, DWORD* workerThreads, DWORD* completionPortThreads); static FCDECL0(INT32, GetThreadCount); static FCDECL0(INT64, GetPendingUnmanagedWorkItemCount); static FCDECL0(VOID, NotifyRequestProgress); static FCDECL0(FC_BOOL_RET, NotifyRequestComplete); static FCDECL0(FC_BOOL_RET, GetEnableWorkerTracking); static FCDECL1(void, ReportThreadStatus, CLR_BOOL isWorking); static FCDECL5(LPVOID, CorRegisterWaitForSingleObject, Object* waitObjectUNSAFE, Object* stateUNSAFE, UINT32 timeout, CLR_BOOL executeOnlyOnce, Object* registeredWaitObjectUNSAFE); static FCDECL1(FC_BOOL_RET, CorPostQueuedCompletionStatus, LPOVERLAPPED lpOverlapped); static FCDECL2(FC_BOOL_RET, CorUnregisterWait, LPVOID WaitHandle, Object * objectToNotify); static FCDECL1(void, CorWaitHandleCleanupNative, LPVOID WaitHandle); static FCDECL1(FC_BOOL_RET, CorBindIoCompletionCallback, HANDLE fileHandle); }; extern "C" INT64 QCALLTYPE ThreadPool_GetCompletedWorkItemCount(); extern "C" BOOL QCALLTYPE ThreadPool_RequestWorkerThread(); extern "C" BOOL QCALLTYPE ThreadPool_PerformGateActivities(INT32 cpuUtilization); extern "C" HANDLE QCALLTYPE AppDomainTimer_Create(INT32 dueTime, INT32 timerId); extern "C" BOOL QCALLTYPE AppDomainTimer_Change(HANDLE hTimer, INT32 dueTime); extern "C" BOOL QCALLTYPE AppDomainTimer_Delete(HANDLE hTimer); VOID QueueUserWorkItemManagedCallback(PVOID pArg); void WINAPI BindIoCompletionCallbackStub(DWORD ErrorCode, DWORD numBytesTransferred, LPOVERLAPPED lpOverlapped); void SetAsyncResultProperties( OVERLAPPEDDATAREF overlapped, DWORD dwErrorCode, DWORD dwNumBytes); #endif
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/native/public/mono/metadata/appdomain.h
/** * \file * AppDomain functions * * Author: * Dietmar Maurer ([email protected]) * * (C) 2001 Ximian, Inc. */ #ifndef _MONO_METADATA_APPDOMAIN_H_ #define _MONO_METADATA_APPDOMAIN_H_ #include <mono/metadata/details/appdomain-types.h> MONO_BEGIN_DECLS #define MONO_API_FUNCTION(ret,name,args) MONO_API ret name args; #include <mono/metadata/details/appdomain-functions.h> #undef MONO_API_FUNCTION MONO_END_DECLS #endif /* _MONO_METADATA_APPDOMAIN_H_ */
/** * \file * AppDomain functions * * Author: * Dietmar Maurer ([email protected]) * * (C) 2001 Ximian, Inc. */ #ifndef _MONO_METADATA_APPDOMAIN_H_ #define _MONO_METADATA_APPDOMAIN_H_ #include <mono/metadata/details/appdomain-types.h> MONO_BEGIN_DECLS #define MONO_API_FUNCTION(ret,name,args) MONO_API ret name args; #include <mono/metadata/details/appdomain-functions.h> #undef MONO_API_FUNCTION MONO_END_DECLS #endif /* _MONO_METADATA_APPDOMAIN_H_ */
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/pal/src/libunwind/include/libunwind-ia64.h
/* libunwind - a platform-independent unwind library Copyright (C) 2001-2004 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef LIBUNWIND_H #define LIBUNWIND_H #include <inttypes.h> #include <ucontext.h> #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif #ifdef ia64 /* This works around a bug in Intel's ECC v7.0 which defines "ia64" as "1". */ # undef ia64 #endif #ifdef __hpux /* On HP-UX, there is no hope of supporting UNW_LOCAL_ONLY, because it's impossible to obtain the address of the members in the sigcontext structure. */ # undef UNW_LOCAL_ONLY # define UNW_GENERIC_ONLY #endif #define UNW_TARGET ia64 #define UNW_TARGET_IA64 1 #define _U_TDEP_QP_TRUE 0 /* see libunwind-dynamic.h */ /* This needs to be big enough to accommodate "struct cursor", while leaving some slack for future expansion. Changing this value will require recompiling all users of this library. Stack allocation is relatively cheap and unwind-state copying is relatively rare, so we want to err on making it rather too big than too small. */ #define UNW_TDEP_CURSOR_LEN 511 /* If this bit is it indicates that the procedure saved all of ar.bsp, ar.bspstore, and ar.rnat. If, additionally, ar.bsp != saved ar.bsp, then this procedure has performed a register-backing-store switch. */ #define UNW_PI_FLAG_IA64_RBS_SWITCH_BIT (UNW_PI_FLAG_FIRST_TDEP_BIT + 0) #define UNW_PI_FLAG_IA64_RBS_SWITCH (1 << UNW_PI_FLAG_IA64_RBS_SWITCH_BIT) typedef uint64_t unw_word_t; typedef int64_t unw_sword_t; /* On IA-64, we want to access the contents of floating-point registers as a pair of "words", but to ensure 16-byte alignment, we make it a union that contains a "long double". This will do the Right Thing on all known IA-64 platforms, including HP-UX. */ typedef union { struct { unw_word_t bits[2]; } raw; long double dummy; /* dummy to force 16-byte alignment */ } unw_tdep_fpreg_t; typedef struct { /* no ia64-specific auxiliary proc-info */ } unw_tdep_proc_info_t; typedef enum { /* Note: general registers are excepted to start with index 0. This convention facilitates architecture-independent implementation of the C++ exception handling ABI. See _Unwind_SetGR() and _Unwind_GetGR() for details. */ UNW_IA64_GR = 0, /* general registers (r0..r127) */ UNW_IA64_GP = UNW_IA64_GR + 1, UNW_IA64_TP = UNW_IA64_GR + 13, UNW_IA64_NAT = UNW_IA64_GR + 128, /* NaT registers (nat0..nat127) */ UNW_IA64_FR = UNW_IA64_NAT + 128, /* fp registers (f0..f127) */ UNW_IA64_AR = UNW_IA64_FR + 128, /* application registers (ar0..r127) */ UNW_IA64_AR_RSC = UNW_IA64_AR + 16, UNW_IA64_AR_BSP = UNW_IA64_AR + 17, UNW_IA64_AR_BSPSTORE = UNW_IA64_AR + 18, UNW_IA64_AR_RNAT = UNW_IA64_AR + 19, UNW_IA64_AR_CSD = UNW_IA64_AR + 25, UNW_IA64_AR_26 = UNW_IA64_AR + 26, UNW_IA64_AR_SSD = UNW_IA64_AR_26, UNW_IA64_AR_CCV = UNW_IA64_AR + 32, UNW_IA64_AR_UNAT = UNW_IA64_AR + 36, UNW_IA64_AR_FPSR = UNW_IA64_AR + 40, UNW_IA64_AR_PFS = UNW_IA64_AR + 64, UNW_IA64_AR_LC = UNW_IA64_AR + 65, UNW_IA64_AR_EC = UNW_IA64_AR + 66, UNW_IA64_BR = UNW_IA64_AR + 128, /* branch registers (b0..p7) */ UNW_IA64_RP = UNW_IA64_BR + 0, /* return pointer (rp) */ UNW_IA64_PR = UNW_IA64_BR + 8, /* predicate registers (p0..p63) */ UNW_IA64_CFM, /* frame info: */ UNW_IA64_BSP, UNW_IA64_IP, UNW_IA64_SP, UNW_TDEP_LAST_REG = UNW_IA64_SP, UNW_TDEP_IP = UNW_IA64_IP, UNW_TDEP_SP = UNW_IA64_SP, UNW_TDEP_EH = UNW_IA64_GR + 15 } ia64_regnum_t; #define UNW_TDEP_NUM_EH_REGS 4 /* r15-r18 are exception args */ typedef struct unw_tdep_save_loc { /* Additional target-dependent info on a save location. On IA-64, we use this to provide the bit number in which a NaT bit gets saved. */ uint8_t nat_bitnr; /* Padding reserved for future use. */ uint8_t reserved[7]; } unw_tdep_save_loc_t; /* On IA-64, we can directly use ucontext_t as the unwind context. */ typedef ucontext_t unw_tdep_context_t; #define unw_tdep_is_fpreg(r) ((unsigned) ((r) - UNW_IA64_FR) < 128) #include "libunwind-dynamic.h" #include "libunwind-common.h" #ifdef __hpux /* In theory, we could use _Uia64_getcontext() on HP-UX as well, but the benefit of doing so would be marginal given that it can't support UNW_LOCAL_ONLY. */ # define unw_tdep_getcontext getcontext #else # define unw_tdep_getcontext UNW_ARCH_OBJ (getcontext) extern int unw_tdep_getcontext (unw_tdep_context_t *); #endif /* This is a helper routine to search an ia64 unwind table. If the address-space argument AS points to something other than the local address-space, the memory for the unwind-info will be allocated with malloc(), and should be free()d during the put_unwind_info() callback. This routine is signal-safe for the local-address-space case ONLY. */ #define unw_search_ia64_unwind_table UNW_OBJ(search_unwind_table) extern int unw_search_ia64_unwind_table (unw_addr_space_t, unw_word_t, unw_dyn_info_t *, unw_proc_info_t *, int, void *); /* This is a helper routine which the get_dyn_info_list_addr() callback can use to locate the special dynamic-info list entry in an IA-64 unwind table. If the entry exists in the table, the list-address is returned. In all other cases, 0 is returned. */ extern unw_word_t _Uia64_find_dyn_list (unw_addr_space_t, unw_dyn_info_t *, void *); /* This is a helper routine to obtain the kernel-unwind info. It is signal-safe. */ extern int _Uia64_get_kernel_table (unw_dyn_info_t *); #if defined(__cplusplus) || defined(c_plusplus) } #endif #endif /* LIBUNWIND_H */
/* libunwind - a platform-independent unwind library Copyright (C) 2001-2004 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef LIBUNWIND_H #define LIBUNWIND_H #include <inttypes.h> #include <ucontext.h> #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif #ifdef ia64 /* This works around a bug in Intel's ECC v7.0 which defines "ia64" as "1". */ # undef ia64 #endif #ifdef __hpux /* On HP-UX, there is no hope of supporting UNW_LOCAL_ONLY, because it's impossible to obtain the address of the members in the sigcontext structure. */ # undef UNW_LOCAL_ONLY # define UNW_GENERIC_ONLY #endif #define UNW_TARGET ia64 #define UNW_TARGET_IA64 1 #define _U_TDEP_QP_TRUE 0 /* see libunwind-dynamic.h */ /* This needs to be big enough to accommodate "struct cursor", while leaving some slack for future expansion. Changing this value will require recompiling all users of this library. Stack allocation is relatively cheap and unwind-state copying is relatively rare, so we want to err on making it rather too big than too small. */ #define UNW_TDEP_CURSOR_LEN 511 /* If this bit is it indicates that the procedure saved all of ar.bsp, ar.bspstore, and ar.rnat. If, additionally, ar.bsp != saved ar.bsp, then this procedure has performed a register-backing-store switch. */ #define UNW_PI_FLAG_IA64_RBS_SWITCH_BIT (UNW_PI_FLAG_FIRST_TDEP_BIT + 0) #define UNW_PI_FLAG_IA64_RBS_SWITCH (1 << UNW_PI_FLAG_IA64_RBS_SWITCH_BIT) typedef uint64_t unw_word_t; typedef int64_t unw_sword_t; /* On IA-64, we want to access the contents of floating-point registers as a pair of "words", but to ensure 16-byte alignment, we make it a union that contains a "long double". This will do the Right Thing on all known IA-64 platforms, including HP-UX. */ typedef union { struct { unw_word_t bits[2]; } raw; long double dummy; /* dummy to force 16-byte alignment */ } unw_tdep_fpreg_t; typedef struct { /* no ia64-specific auxiliary proc-info */ } unw_tdep_proc_info_t; typedef enum { /* Note: general registers are excepted to start with index 0. This convention facilitates architecture-independent implementation of the C++ exception handling ABI. See _Unwind_SetGR() and _Unwind_GetGR() for details. */ UNW_IA64_GR = 0, /* general registers (r0..r127) */ UNW_IA64_GP = UNW_IA64_GR + 1, UNW_IA64_TP = UNW_IA64_GR + 13, UNW_IA64_NAT = UNW_IA64_GR + 128, /* NaT registers (nat0..nat127) */ UNW_IA64_FR = UNW_IA64_NAT + 128, /* fp registers (f0..f127) */ UNW_IA64_AR = UNW_IA64_FR + 128, /* application registers (ar0..r127) */ UNW_IA64_AR_RSC = UNW_IA64_AR + 16, UNW_IA64_AR_BSP = UNW_IA64_AR + 17, UNW_IA64_AR_BSPSTORE = UNW_IA64_AR + 18, UNW_IA64_AR_RNAT = UNW_IA64_AR + 19, UNW_IA64_AR_CSD = UNW_IA64_AR + 25, UNW_IA64_AR_26 = UNW_IA64_AR + 26, UNW_IA64_AR_SSD = UNW_IA64_AR_26, UNW_IA64_AR_CCV = UNW_IA64_AR + 32, UNW_IA64_AR_UNAT = UNW_IA64_AR + 36, UNW_IA64_AR_FPSR = UNW_IA64_AR + 40, UNW_IA64_AR_PFS = UNW_IA64_AR + 64, UNW_IA64_AR_LC = UNW_IA64_AR + 65, UNW_IA64_AR_EC = UNW_IA64_AR + 66, UNW_IA64_BR = UNW_IA64_AR + 128, /* branch registers (b0..p7) */ UNW_IA64_RP = UNW_IA64_BR + 0, /* return pointer (rp) */ UNW_IA64_PR = UNW_IA64_BR + 8, /* predicate registers (p0..p63) */ UNW_IA64_CFM, /* frame info: */ UNW_IA64_BSP, UNW_IA64_IP, UNW_IA64_SP, UNW_TDEP_LAST_REG = UNW_IA64_SP, UNW_TDEP_IP = UNW_IA64_IP, UNW_TDEP_SP = UNW_IA64_SP, UNW_TDEP_EH = UNW_IA64_GR + 15 } ia64_regnum_t; #define UNW_TDEP_NUM_EH_REGS 4 /* r15-r18 are exception args */ typedef struct unw_tdep_save_loc { /* Additional target-dependent info on a save location. On IA-64, we use this to provide the bit number in which a NaT bit gets saved. */ uint8_t nat_bitnr; /* Padding reserved for future use. */ uint8_t reserved[7]; } unw_tdep_save_loc_t; /* On IA-64, we can directly use ucontext_t as the unwind context. */ typedef ucontext_t unw_tdep_context_t; #define unw_tdep_is_fpreg(r) ((unsigned) ((r) - UNW_IA64_FR) < 128) #include "libunwind-dynamic.h" #include "libunwind-common.h" #ifdef __hpux /* In theory, we could use _Uia64_getcontext() on HP-UX as well, but the benefit of doing so would be marginal given that it can't support UNW_LOCAL_ONLY. */ # define unw_tdep_getcontext getcontext #else # define unw_tdep_getcontext UNW_ARCH_OBJ (getcontext) extern int unw_tdep_getcontext (unw_tdep_context_t *); #endif /* This is a helper routine to search an ia64 unwind table. If the address-space argument AS points to something other than the local address-space, the memory for the unwind-info will be allocated with malloc(), and should be free()d during the put_unwind_info() callback. This routine is signal-safe for the local-address-space case ONLY. */ #define unw_search_ia64_unwind_table UNW_OBJ(search_unwind_table) extern int unw_search_ia64_unwind_table (unw_addr_space_t, unw_word_t, unw_dyn_info_t *, unw_proc_info_t *, int, void *); /* This is a helper routine which the get_dyn_info_list_addr() callback can use to locate the special dynamic-info list entry in an IA-64 unwind table. If the entry exists in the table, the list-address is returned. In all other cases, 0 is returned. */ extern unw_word_t _Uia64_find_dyn_list (unw_addr_space_t, unw_dyn_info_t *, void *); /* This is a helper routine to obtain the kernel-unwind info. It is signal-safe. */ extern int _Uia64_get_kernel_table (unw_dyn_info_t *); #if defined(__cplusplus) || defined(c_plusplus) } #endif #endif /* LIBUNWIND_H */
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/pal/tests/palsuite/c_runtime/wcstod/test2/test2.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test2.c ** ** Purpose: Tests wcstod with overflows ** ** **===================================================================*/ #include <palsuite.h> PALTEST(c_runtime_wcstod_test2_paltest_wcstod_test2, "c_runtime/wcstod/test2/paltest_wcstod_test2") { /* Representation of positive infinty for a IEEE 64-bit double */ INT64 PosInifity = (INT64)(0x7ff00000) << 32; double HugeVal = *(double*) &PosInifity; char *PosStr = "1E+10000"; char *NegStr = "-1E+10000"; WCHAR *wideStr; double result; if (PAL_Initialize(argc,argv)) { return FAIL; } wideStr = convert(PosStr); result = wcstod(wideStr, NULL); free(wideStr); if (result != HugeVal) { Fail("ERROR: wcstod interpreted \"%s\" as %g instead of %g\n", PosStr, result, HugeVal); } wideStr = convert(NegStr); result = wcstod(wideStr, NULL); free(wideStr); if (result != -HugeVal) { Fail("ERROR: wcstod interpreted \"%s\" as %g instead of %g\n", NegStr, result, -HugeVal); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test2.c ** ** Purpose: Tests wcstod with overflows ** ** **===================================================================*/ #include <palsuite.h> PALTEST(c_runtime_wcstod_test2_paltest_wcstod_test2, "c_runtime/wcstod/test2/paltest_wcstod_test2") { /* Representation of positive infinty for a IEEE 64-bit double */ INT64 PosInifity = (INT64)(0x7ff00000) << 32; double HugeVal = *(double*) &PosInifity; char *PosStr = "1E+10000"; char *NegStr = "-1E+10000"; WCHAR *wideStr; double result; if (PAL_Initialize(argc,argv)) { return FAIL; } wideStr = convert(PosStr); result = wcstod(wideStr, NULL); free(wideStr); if (result != HugeVal) { Fail("ERROR: wcstod interpreted \"%s\" as %g instead of %g\n", PosStr, result, HugeVal); } wideStr = convert(NegStr); result = wcstod(wideStr, NULL); free(wideStr); if (result != -HugeVal) { Fail("ERROR: wcstod interpreted \"%s\" as %g instead of %g\n", NegStr, result, -HugeVal); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/native/libs/System.Security.Cryptography.Native.Android/pal_evp.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include "pal_jni.h" #define EVP_MAX_MD_SIZE 64 PALEXPORT int32_t CryptoNative_EvpMdSize(intptr_t md); PALEXPORT int32_t CryptoNative_GetMaxMdSize(void); PALEXPORT intptr_t CryptoNative_EvpMd5(void); PALEXPORT intptr_t CryptoNative_EvpSha1(void); PALEXPORT intptr_t CryptoNative_EvpSha256(void); PALEXPORT intptr_t CryptoNative_EvpSha384(void); PALEXPORT intptr_t CryptoNative_EvpSha512(void); PALEXPORT int32_t CryptoNative_EvpDigestOneShot(intptr_t type, void* source, int32_t sourceSize, uint8_t* md, uint32_t* mdSize); PALEXPORT jobject CryptoNative_EvpMdCtxCreate(intptr_t type); PALEXPORT int32_t CryptoNative_EvpDigestReset(jobject ctx, intptr_t type); PALEXPORT int32_t CryptoNative_EvpDigestUpdate(jobject ctx, void* d, int32_t cnt); PALEXPORT int32_t CryptoNative_EvpDigestFinalEx(jobject ctx, uint8_t* md, uint32_t* s); PALEXPORT int32_t CryptoNative_EvpDigestCurrent(jobject ctx, uint8_t* md, uint32_t* s); PALEXPORT void CryptoNative_EvpMdCtxDestroy(jobject ctx);
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include "pal_jni.h" #define EVP_MAX_MD_SIZE 64 PALEXPORT int32_t CryptoNative_EvpMdSize(intptr_t md); PALEXPORT int32_t CryptoNative_GetMaxMdSize(void); PALEXPORT intptr_t CryptoNative_EvpMd5(void); PALEXPORT intptr_t CryptoNative_EvpSha1(void); PALEXPORT intptr_t CryptoNative_EvpSha256(void); PALEXPORT intptr_t CryptoNative_EvpSha384(void); PALEXPORT intptr_t CryptoNative_EvpSha512(void); PALEXPORT int32_t CryptoNative_EvpDigestOneShot(intptr_t type, void* source, int32_t sourceSize, uint8_t* md, uint32_t* mdSize); PALEXPORT jobject CryptoNative_EvpMdCtxCreate(intptr_t type); PALEXPORT int32_t CryptoNative_EvpDigestReset(jobject ctx, intptr_t type); PALEXPORT int32_t CryptoNative_EvpDigestUpdate(jobject ctx, void* d, int32_t cnt); PALEXPORT int32_t CryptoNative_EvpDigestFinalEx(jobject ctx, uint8_t* md, uint32_t* s); PALEXPORT int32_t CryptoNative_EvpDigestCurrent(jobject ctx, uint8_t* md, uint32_t* s); PALEXPORT void CryptoNative_EvpMdCtxDestroy(jobject ctx);
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/pal/tests/palsuite/c_runtime/ferror/test2/test2.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test2.c ** ** Purpose: Open a read-only file and attempt to write some data to it. ** Check to ensure that an ferror occurs. ** ** Depends: ** fopen ** fwrite ** fclose ** ** ** **===================================================================*/ #include <palsuite.h> PALTEST(c_runtime_ferror_test2_paltest_ferror_test2, "c_runtime/ferror/test2/paltest_ferror_test2") { const char filename[] = "testfile"; FILE * fp = NULL; int result; if (PAL_Initialize(argc, argv)) { return FAIL; } /* Open a file in READONLY mode */ if((fp = fopen(filename, "r")) == NULL) { Fail("Unable to open a file for reading."); } /* Attempt to write 14 characters to the file. */ if((result = fwrite("This is a test",1,14,fp)) != 0) { Fail("ERROR: %d characters written. 0 characters should " "have been written, since this file is read-only.", result); } if(ferror(fp) == 0) { Fail("ERROR: ferror should have generated an error when " "write was called on a read-only file. But, it " "retured 0, indicating no error.\n"); } /* Close the file. */ if(fclose(fp) != 0) { Fail("ERROR: fclose failed when trying to close a file pointer. " "This test depends on fclose working properly."); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test2.c ** ** Purpose: Open a read-only file and attempt to write some data to it. ** Check to ensure that an ferror occurs. ** ** Depends: ** fopen ** fwrite ** fclose ** ** ** **===================================================================*/ #include <palsuite.h> PALTEST(c_runtime_ferror_test2_paltest_ferror_test2, "c_runtime/ferror/test2/paltest_ferror_test2") { const char filename[] = "testfile"; FILE * fp = NULL; int result; if (PAL_Initialize(argc, argv)) { return FAIL; } /* Open a file in READONLY mode */ if((fp = fopen(filename, "r")) == NULL) { Fail("Unable to open a file for reading."); } /* Attempt to write 14 characters to the file. */ if((result = fwrite("This is a test",1,14,fp)) != 0) { Fail("ERROR: %d characters written. 0 characters should " "have been written, since this file is read-only.", result); } if(ferror(fp) == 0) { Fail("ERROR: ferror should have generated an error when " "write was called on a read-only file. But, it " "retured 0, indicating no error.\n"); } /* Close the file. */ if(fclose(fp) != 0) { Fail("ERROR: fclose failed when trying to close a file pointer. " "This test depends on fclose working properly."); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/tests/Interop/COM/NativeClients/Dispatch/ClientTests.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <xplatform.h> #include <cassert> #include <Server.Contracts.h> #define COM_CLIENT #include <Servers.h> #define THROW_IF_FAILED(exp) { hr = exp; if (FAILED(hr)) { ::printf("FAILURE: 0x%08x = %s\n", hr, #exp); throw hr; } } #define THROW_FAIL_IF_FALSE(exp) { if (!(exp)) { ::printf("FALSE: %s\n", #exp); throw E_FAIL; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <xplatform.h> #include <cassert> #include <Server.Contracts.h> #define COM_CLIENT #include <Servers.h> #define THROW_IF_FAILED(exp) { hr = exp; if (FAILED(hr)) { ::printf("FAILURE: 0x%08x = %s\n", hr, #exp); throw hr; } } #define THROW_FAIL_IF_FALSE(exp) { if (!(exp)) { ::printf("FALSE: %s\n", #exp); throw E_FAIL; } }
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/tests/Interop/PInvoke/Generics/GenericsNative.SpanL.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <stdio.h> #include <stdint.h> #include <xplatform.h> #include <platformdefines.h> struct ByReferenceL { intptr_t value; }; struct SpanL { ByReferenceL pointer; int32_t length; }; static SpanL SpanLValue = { }; extern "C" DLL_EXPORT SpanL STDMETHODCALLTYPE GetSpanL(int64_t e00) { throw "P/Invoke for Span<long> should be unsupported."; } extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetSpanLOut(int64_t e00, SpanL* pValue) { throw "P/Invoke for Span<long> should be unsupported."; } extern "C" DLL_EXPORT const SpanL* STDMETHODCALLTYPE GetSpanLPtr(int64_t e00) { throw "P/Invoke for Span<long> should be unsupported."; } extern "C" DLL_EXPORT SpanL STDMETHODCALLTYPE AddSpanL(SpanL lhs, SpanL rhs) { throw "P/Invoke for Span<long> should be unsupported."; } extern "C" DLL_EXPORT SpanL STDMETHODCALLTYPE AddSpanLs(const SpanL* pValues, uint32_t count) { throw "P/Invoke for Span<long> should be unsupported."; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <stdio.h> #include <stdint.h> #include <xplatform.h> #include <platformdefines.h> struct ByReferenceL { intptr_t value; }; struct SpanL { ByReferenceL pointer; int32_t length; }; static SpanL SpanLValue = { }; extern "C" DLL_EXPORT SpanL STDMETHODCALLTYPE GetSpanL(int64_t e00) { throw "P/Invoke for Span<long> should be unsupported."; } extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetSpanLOut(int64_t e00, SpanL* pValue) { throw "P/Invoke for Span<long> should be unsupported."; } extern "C" DLL_EXPORT const SpanL* STDMETHODCALLTYPE GetSpanLPtr(int64_t e00) { throw "P/Invoke for Span<long> should be unsupported."; } extern "C" DLL_EXPORT SpanL STDMETHODCALLTYPE AddSpanL(SpanL lhs, SpanL rhs) { throw "P/Invoke for Span<long> should be unsupported."; } extern "C" DLL_EXPORT SpanL STDMETHODCALLTYPE AddSpanLs(const SpanL* pValues, uint32_t count) { throw "P/Invoke for Span<long> should be unsupported."; }
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/tools/superpmi/superpmi-shim-simple/icorjitcompiler.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "standardpch.h" #include "icorjitcompiler.h" #include "icorjitinfo.h" void interceptor_ICJC::setTargetOS(CORINFO_OS os) { original_ICorJitCompiler->setTargetOS(os); } CorJitResult interceptor_ICJC::compileMethod(ICorJitInfo* comp, /* IN */ struct CORINFO_METHOD_INFO* info, /* IN */ unsigned /* code:CorJitFlag */ flags, /* IN */ uint8_t** nativeEntry, /* OUT */ uint32_t* nativeSizeOfCode /* OUT */ ) { interceptor_ICJI our_ICorJitInfo; our_ICorJitInfo.original_ICorJitInfo = comp; CorJitResult temp = original_ICorJitCompiler->compileMethod(&our_ICorJitInfo, info, flags, nativeEntry, nativeSizeOfCode); return temp; } void interceptor_ICJC::ProcessShutdownWork(ICorStaticInfo* info) { original_ICorJitCompiler->ProcessShutdownWork(info); } void interceptor_ICJC::getVersionIdentifier(GUID* versionIdentifier /* OUT */) { original_ICorJitCompiler->getVersionIdentifier(versionIdentifier); } unsigned interceptor_ICJC::getMaxIntrinsicSIMDVectorLength(CORJIT_FLAGS cpuCompileFlags) { return original_ICorJitCompiler->getMaxIntrinsicSIMDVectorLength(cpuCompileFlags); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "standardpch.h" #include "icorjitcompiler.h" #include "icorjitinfo.h" void interceptor_ICJC::setTargetOS(CORINFO_OS os) { original_ICorJitCompiler->setTargetOS(os); } CorJitResult interceptor_ICJC::compileMethod(ICorJitInfo* comp, /* IN */ struct CORINFO_METHOD_INFO* info, /* IN */ unsigned /* code:CorJitFlag */ flags, /* IN */ uint8_t** nativeEntry, /* OUT */ uint32_t* nativeSizeOfCode /* OUT */ ) { interceptor_ICJI our_ICorJitInfo; our_ICorJitInfo.original_ICorJitInfo = comp; CorJitResult temp = original_ICorJitCompiler->compileMethod(&our_ICorJitInfo, info, flags, nativeEntry, nativeSizeOfCode); return temp; } void interceptor_ICJC::ProcessShutdownWork(ICorStaticInfo* info) { original_ICorJitCompiler->ProcessShutdownWork(info); } void interceptor_ICJC::getVersionIdentifier(GUID* versionIdentifier /* OUT */) { original_ICorJitCompiler->getVersionIdentifier(versionIdentifier); } unsigned interceptor_ICJC::getMaxIntrinsicSIMDVectorLength(CORJIT_FLAGS cpuCompileFlags) { return original_ICorJitCompiler->getMaxIntrinsicSIMDVectorLength(cpuCompileFlags); }
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/debug/inc/common.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef DEBUGGER_COMMON_H #define DEBUGGER_COMMON_H // // Conversions between pointers and CORDB_ADDRESS // These are 3gb safe - we use zero-extension for CORDB_ADDRESS. // Note that this is a different semantics from CLRDATA_ADDRESS which is sign-extended. // // @dbgtodo : This confuses the host and target address spaces. Ideally we'd have // conversions between PTR types (eg. DPTR) and CORDB_ADDRESS, and not need conversions // from host pointer types to CORDB_ADDRESS. // #if defined(TARGET_X86) || defined(TARGET_ARM) inline CORDB_ADDRESS PTR_TO_CORDB_ADDRESS(const void* ptr) { SUPPORTS_DAC; // Cast a void* to a ULONG is not 64-bit safe and triggers compiler warning C3411. // But this is x86 only, so we know it's ok. Use PtrToUlong to do the conversion // without invoking the error. return (CORDB_ADDRESS)(PtrToUlong(ptr)); } inline CORDB_ADDRESS PTR_TO_CORDB_ADDRESS(UINT_PTR ptr) { SUPPORTS_DAC; // PtrToUlong return (CORDB_ADDRESS)(ULONG)(ptr); } #else #define PTR_TO_CORDB_ADDRESS(_ptr) (CORDB_ADDRESS)(ULONG_PTR)(_ptr) #endif //TARGET_X86 || TARGET_ARM #define CORDB_ADDRESS_TO_PTR(_cordb_addr) ((LPVOID)(SIZE_T)(_cordb_addr)) // Determine if an exception record is for a CLR debug event, and get the payload. CORDB_ADDRESS IsEventDebuggerNotification(const EXCEPTION_RECORD * pRecord, CORDB_ADDRESS pClrBaseAddress); #if defined(FEATURE_DBGIPC_TRANSPORT_DI) || defined(FEATURE_DBGIPC_TRANSPORT_VM) struct DebuggerIPCEvent; void InitEventForDebuggerNotification(DEBUG_EVENT * pDebugEvent, CORDB_ADDRESS pClrBaseAddress, DebuggerIPCEvent * pIPCEvent); #endif // (FEATURE_DBGIPC_TRANSPORT_DI || FEATURE_DBGIPC_TRANSPORT_VM) void GetPidDecoratedName(_Out_writes_z_(cBufSizeInChars) WCHAR * pBuf, int cBufSizeInChars, const WCHAR * pPrefix, DWORD pid); // // This macro is used in CORDbgCopyThreadContext(). // // CORDbgCopyThreadContext() does an intelligent copy // from pSrc to pDst, respecting the ContextFlags of both contexts. // #define CopyContextChunk(_t, _f, _end, _flag) \ { \ LOG((LF_CORDB, LL_INFO1000000, \ "CP::CTC: copying " #_flag ":" FMT_ADDR "<---" FMT_ADDR "(%d)\n", \ DBG_ADDR(_t), DBG_ADDR(_f), ((UINT_PTR)(_end) - (UINT_PTR)_t))); \ memcpy((_t), (_f), ((UINT_PTR)(_end) - (UINT_PTR)(_t))); \ } // // CORDbgCopyThreadContext() does an intelligent copy from pSrc to pDst, // respecting the ContextFlags of both contexts. // struct DebuggerREGDISPLAY; extern void CORDbgCopyThreadContext(DT_CONTEXT* pDst, const DT_CONTEXT* pSrc); extern void CORDbgSetDebuggerREGDISPLAYFromContext(DebuggerREGDISPLAY *pDRD, DT_CONTEXT* pContext); //--------------------------------------------------------------------------------------- // // Return the size of the CONTEXT required for the specified context flags. // // Arguments: // flags - this is the equivalent of the ContextFlags field of a CONTEXT // // Return Value: // size of the CONTEXT required // // Notes: // On WIN64 platforms this function will always return sizeof(CONTEXT). // inline ULONG32 ContextSizeForFlags(ULONG32 flags) { #if defined(CONTEXT_EXTENDED_REGISTERS) && defined(TARGET_X86) // Older platforms didn't have extended registers in // the context definition so only enforce that size // if the extended register flag is set. if ((flags & CONTEXT_EXTENDED_REGISTERS) != CONTEXT_EXTENDED_REGISTERS) { return offsetof(T_CONTEXT, ExtendedRegisters); } else #endif // TARGET_X86 { return sizeof(T_CONTEXT); } } //--------------------------------------------------------------------------------------- // // Given the size of a buffer and the context flags, check whether the buffer is sufficient large // to hold the CONTEXT. // // Arguments: // size - size of a buffer // flags - this is the equivalent of the ContextFlags field of a CONTEXT // // Return Value: // TRUE if the buffer is large enough to hold the CONTEXT // inline BOOL CheckContextSizeForFlags(ULONG32 size, ULONG32 flags) { return (size >= ContextSizeForFlags(flags)); } //--------------------------------------------------------------------------------------- // // Given the size of a buffer and the BYTE array representation of a CONTEXT, // check whether the buffer is sufficient large to hold the CONTEXT. // // Arguments: // size - size of a buffer // flags - this is the equivalent of the ContextFlags field of a CONTEXT // // Return Value: // TRUE if the buffer is large enough to hold the CONTEXT // inline BOOL CheckContextSizeForBuffer(ULONG32 size, const BYTE * pbBuffer) { return ( ( size >= (offsetof(T_CONTEXT, ContextFlags) + sizeof(ULONG32)) ) && CheckContextSizeForFlags(size, (reinterpret_cast<const T_CONTEXT *>(pbBuffer))->ContextFlags) ); } /* ------------------------------------------------------------------------- * * Constant declarations * ------------------------------------------------------------------------- */ enum { NULL_THREAD_ID = -1, NULL_PROCESS_ID = -1 }; /* ------------------------------------------------------------------------- * * Macros * ------------------------------------------------------------------------- */ // // CANNOT USE IsBad*Ptr() methods here. They are *banned* APIs because of various // reasons (see http://winweb/wincet/bannedapis.htm). // #define VALIDATE_POINTER_TO_OBJECT(ptr, type) \ if ((ptr) == NULL) \ { \ return E_INVALIDARG; \ } #define VALIDATE_POINTER_TO_OBJECT_OR_NULL(ptr, type) // // CANNOT USE IsBad*Ptr() methods here. They are *banned* APIs because of various // reasons (see http://winweb/wincet/bannedapis.htm). // #define VALIDATE_POINTER_TO_OBJECT_ARRAY(ptr, type, cElt, fRead, fWrite) \ if ((ptr) == NULL) \ { \ return E_INVALIDARG; \ } #define VALIDATE_POINTER_TO_OBJECT_ARRAY_OR_NULL(ptr, type,cElt,fRead,fWrite) /* ------------------------------------------------------------------------- * * Function Prototypes * ------------------------------------------------------------------------- */ // Linear search through an array of NativeVarInfos, to find // the variable of index dwIndex, valid at the given ip. // // returns CORDBG_E_IL_VAR_NOT_AVAILABLE if the variable isn't // valid at the given ip. // // This should be inlined HRESULT FindNativeInfoInILVariableArray(DWORD dwIndex, SIZE_T ip, ICorDebugInfo::NativeVarInfo **ppNativeInfo, unsigned int nativeInfoCount, ICorDebugInfo::NativeVarInfo *nativeInfo); // struct DebuggerILToNativeMap: Holds the IL to Native offset map // Great pains are taken to ensure that this each entry corresponds to the // first IL instruction in a source line. It isn't actually a mapping // of _every_ IL instruction in a method, just those for source lines. // SIZE_T ilOffset: IL offset of a source line. // SIZE_T nativeStartOffset: Offset within the method where the native // instructions corresponding to the IL offset begin. // SIZE_T nativeEndOffset: Offset within the method where the native // instructions corresponding to the IL offset end. // // Note: any changes to this struct need to be reflected in // COR_DEBUG_IL_TO_NATIVE_MAP in CorDebug.idl. These structs must // match exactly. // struct DebuggerILToNativeMap { ULONG ilOffset; ULONG nativeStartOffset; ULONG nativeEndOffset; ICorDebugInfo::SourceTypes source; }; void ExportILToNativeMap(ULONG32 cMap, COR_DEBUG_IL_TO_NATIVE_MAP mapExt[], struct DebuggerILToNativeMap mapInt[], SIZE_T sizeOfCode); #include <primitives.h> // ---------------------------------------------------------------------------- // IsPatchInRequestedRange // // Description: // This function checks if a patch falls (fully or partially) in the requested range of memory. // // Arguments: // * requestedAddr - the address of the memory range // * requestedSize - the size of the memory range // * patchAddr - the address of the patch // * pPRD - the opcode of the patch // // Return Value: // Return TRUE if the patch is fully or partially in the requested memory range. // // Notes: // Currently this function is called both from the RS (via code:CordbProcess.ReadMemory and // code:CordbProcess.WriteMemory) and from DAC. When we DACize the two functions mentioned above, // this function should be called from DAC only, and we should use a MemoryRange here. // inline bool IsPatchInRequestedRange(CORDB_ADDRESS requestedAddr, SIZE_T requestedSize, CORDB_ADDRESS patchAddr) { SUPPORTS_DAC; if (requestedAddr == 0) return false; // Note that patchEnd points to the byte immediately AFTER the patch, so patchEnd is NOT // part of the patch. CORDB_ADDRESS patchEnd = GetPatchEndAddr(patchAddr); // We have three cases: // 1) the entire patch is in the requested range // 2) the beginning of the requested range is covered by the patch // 3) the end of the requested range is covered by the patch // // Note that on x86, since the break instruction only takes up one byte, the following condition // degenerates to case 1 only. return (((requestedAddr <= patchAddr) && (patchEnd <= (requestedAddr + requestedSize))) || ((patchAddr <= requestedAddr) && (requestedAddr < patchEnd)) || ((patchAddr <= (requestedAddr + requestedSize - 1)) && ((requestedAddr + requestedSize - 1) < patchEnd))); } inline CORDB_ADDRESS ALIGN_ADDRESS( CORDB_ADDRESS val, CORDB_ADDRESS alignment ) { LIMITED_METHOD_DAC_CONTRACT; // alignment must be a power of 2 for this implementation to work (need modulo otherwise) _ASSERTE( 0 == (alignment & (alignment - 1)) ); CORDB_ADDRESS result = (val + (alignment - 1)) & ~(alignment - 1); _ASSERTE( result >= val ); // check for overflow return result; } #include "dacprivate.h" // for MSLAYOUT #include "dumpcommon.h" #endif //DEBUGGER_COMMON_H
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef DEBUGGER_COMMON_H #define DEBUGGER_COMMON_H // // Conversions between pointers and CORDB_ADDRESS // These are 3gb safe - we use zero-extension for CORDB_ADDRESS. // Note that this is a different semantics from CLRDATA_ADDRESS which is sign-extended. // // @dbgtodo : This confuses the host and target address spaces. Ideally we'd have // conversions between PTR types (eg. DPTR) and CORDB_ADDRESS, and not need conversions // from host pointer types to CORDB_ADDRESS. // #if defined(TARGET_X86) || defined(TARGET_ARM) inline CORDB_ADDRESS PTR_TO_CORDB_ADDRESS(const void* ptr) { SUPPORTS_DAC; // Cast a void* to a ULONG is not 64-bit safe and triggers compiler warning C3411. // But this is x86 only, so we know it's ok. Use PtrToUlong to do the conversion // without invoking the error. return (CORDB_ADDRESS)(PtrToUlong(ptr)); } inline CORDB_ADDRESS PTR_TO_CORDB_ADDRESS(UINT_PTR ptr) { SUPPORTS_DAC; // PtrToUlong return (CORDB_ADDRESS)(ULONG)(ptr); } #else #define PTR_TO_CORDB_ADDRESS(_ptr) (CORDB_ADDRESS)(ULONG_PTR)(_ptr) #endif //TARGET_X86 || TARGET_ARM #define CORDB_ADDRESS_TO_PTR(_cordb_addr) ((LPVOID)(SIZE_T)(_cordb_addr)) // Determine if an exception record is for a CLR debug event, and get the payload. CORDB_ADDRESS IsEventDebuggerNotification(const EXCEPTION_RECORD * pRecord, CORDB_ADDRESS pClrBaseAddress); #if defined(FEATURE_DBGIPC_TRANSPORT_DI) || defined(FEATURE_DBGIPC_TRANSPORT_VM) struct DebuggerIPCEvent; void InitEventForDebuggerNotification(DEBUG_EVENT * pDebugEvent, CORDB_ADDRESS pClrBaseAddress, DebuggerIPCEvent * pIPCEvent); #endif // (FEATURE_DBGIPC_TRANSPORT_DI || FEATURE_DBGIPC_TRANSPORT_VM) void GetPidDecoratedName(_Out_writes_z_(cBufSizeInChars) WCHAR * pBuf, int cBufSizeInChars, const WCHAR * pPrefix, DWORD pid); // // This macro is used in CORDbgCopyThreadContext(). // // CORDbgCopyThreadContext() does an intelligent copy // from pSrc to pDst, respecting the ContextFlags of both contexts. // #define CopyContextChunk(_t, _f, _end, _flag) \ { \ LOG((LF_CORDB, LL_INFO1000000, \ "CP::CTC: copying " #_flag ":" FMT_ADDR "<---" FMT_ADDR "(%d)\n", \ DBG_ADDR(_t), DBG_ADDR(_f), ((UINT_PTR)(_end) - (UINT_PTR)_t))); \ memcpy((_t), (_f), ((UINT_PTR)(_end) - (UINT_PTR)(_t))); \ } // // CORDbgCopyThreadContext() does an intelligent copy from pSrc to pDst, // respecting the ContextFlags of both contexts. // struct DebuggerREGDISPLAY; extern void CORDbgCopyThreadContext(DT_CONTEXT* pDst, const DT_CONTEXT* pSrc); extern void CORDbgSetDebuggerREGDISPLAYFromContext(DebuggerREGDISPLAY *pDRD, DT_CONTEXT* pContext); //--------------------------------------------------------------------------------------- // // Return the size of the CONTEXT required for the specified context flags. // // Arguments: // flags - this is the equivalent of the ContextFlags field of a CONTEXT // // Return Value: // size of the CONTEXT required // // Notes: // On WIN64 platforms this function will always return sizeof(CONTEXT). // inline ULONG32 ContextSizeForFlags(ULONG32 flags) { #if defined(CONTEXT_EXTENDED_REGISTERS) && defined(TARGET_X86) // Older platforms didn't have extended registers in // the context definition so only enforce that size // if the extended register flag is set. if ((flags & CONTEXT_EXTENDED_REGISTERS) != CONTEXT_EXTENDED_REGISTERS) { return offsetof(T_CONTEXT, ExtendedRegisters); } else #endif // TARGET_X86 { return sizeof(T_CONTEXT); } } //--------------------------------------------------------------------------------------- // // Given the size of a buffer and the context flags, check whether the buffer is sufficient large // to hold the CONTEXT. // // Arguments: // size - size of a buffer // flags - this is the equivalent of the ContextFlags field of a CONTEXT // // Return Value: // TRUE if the buffer is large enough to hold the CONTEXT // inline BOOL CheckContextSizeForFlags(ULONG32 size, ULONG32 flags) { return (size >= ContextSizeForFlags(flags)); } //--------------------------------------------------------------------------------------- // // Given the size of a buffer and the BYTE array representation of a CONTEXT, // check whether the buffer is sufficient large to hold the CONTEXT. // // Arguments: // size - size of a buffer // flags - this is the equivalent of the ContextFlags field of a CONTEXT // // Return Value: // TRUE if the buffer is large enough to hold the CONTEXT // inline BOOL CheckContextSizeForBuffer(ULONG32 size, const BYTE * pbBuffer) { return ( ( size >= (offsetof(T_CONTEXT, ContextFlags) + sizeof(ULONG32)) ) && CheckContextSizeForFlags(size, (reinterpret_cast<const T_CONTEXT *>(pbBuffer))->ContextFlags) ); } /* ------------------------------------------------------------------------- * * Constant declarations * ------------------------------------------------------------------------- */ enum { NULL_THREAD_ID = -1, NULL_PROCESS_ID = -1 }; /* ------------------------------------------------------------------------- * * Macros * ------------------------------------------------------------------------- */ // // CANNOT USE IsBad*Ptr() methods here. They are *banned* APIs because of various // reasons (see http://winweb/wincet/bannedapis.htm). // #define VALIDATE_POINTER_TO_OBJECT(ptr, type) \ if ((ptr) == NULL) \ { \ return E_INVALIDARG; \ } #define VALIDATE_POINTER_TO_OBJECT_OR_NULL(ptr, type) // // CANNOT USE IsBad*Ptr() methods here. They are *banned* APIs because of various // reasons (see http://winweb/wincet/bannedapis.htm). // #define VALIDATE_POINTER_TO_OBJECT_ARRAY(ptr, type, cElt, fRead, fWrite) \ if ((ptr) == NULL) \ { \ return E_INVALIDARG; \ } #define VALIDATE_POINTER_TO_OBJECT_ARRAY_OR_NULL(ptr, type,cElt,fRead,fWrite) /* ------------------------------------------------------------------------- * * Function Prototypes * ------------------------------------------------------------------------- */ // Linear search through an array of NativeVarInfos, to find // the variable of index dwIndex, valid at the given ip. // // returns CORDBG_E_IL_VAR_NOT_AVAILABLE if the variable isn't // valid at the given ip. // // This should be inlined HRESULT FindNativeInfoInILVariableArray(DWORD dwIndex, SIZE_T ip, ICorDebugInfo::NativeVarInfo **ppNativeInfo, unsigned int nativeInfoCount, ICorDebugInfo::NativeVarInfo *nativeInfo); // struct DebuggerILToNativeMap: Holds the IL to Native offset map // Great pains are taken to ensure that this each entry corresponds to the // first IL instruction in a source line. It isn't actually a mapping // of _every_ IL instruction in a method, just those for source lines. // SIZE_T ilOffset: IL offset of a source line. // SIZE_T nativeStartOffset: Offset within the method where the native // instructions corresponding to the IL offset begin. // SIZE_T nativeEndOffset: Offset within the method where the native // instructions corresponding to the IL offset end. // // Note: any changes to this struct need to be reflected in // COR_DEBUG_IL_TO_NATIVE_MAP in CorDebug.idl. These structs must // match exactly. // struct DebuggerILToNativeMap { ULONG ilOffset; ULONG nativeStartOffset; ULONG nativeEndOffset; ICorDebugInfo::SourceTypes source; }; void ExportILToNativeMap(ULONG32 cMap, COR_DEBUG_IL_TO_NATIVE_MAP mapExt[], struct DebuggerILToNativeMap mapInt[], SIZE_T sizeOfCode); #include <primitives.h> // ---------------------------------------------------------------------------- // IsPatchInRequestedRange // // Description: // This function checks if a patch falls (fully or partially) in the requested range of memory. // // Arguments: // * requestedAddr - the address of the memory range // * requestedSize - the size of the memory range // * patchAddr - the address of the patch // * pPRD - the opcode of the patch // // Return Value: // Return TRUE if the patch is fully or partially in the requested memory range. // // Notes: // Currently this function is called both from the RS (via code:CordbProcess.ReadMemory and // code:CordbProcess.WriteMemory) and from DAC. When we DACize the two functions mentioned above, // this function should be called from DAC only, and we should use a MemoryRange here. // inline bool IsPatchInRequestedRange(CORDB_ADDRESS requestedAddr, SIZE_T requestedSize, CORDB_ADDRESS patchAddr) { SUPPORTS_DAC; if (requestedAddr == 0) return false; // Note that patchEnd points to the byte immediately AFTER the patch, so patchEnd is NOT // part of the patch. CORDB_ADDRESS patchEnd = GetPatchEndAddr(patchAddr); // We have three cases: // 1) the entire patch is in the requested range // 2) the beginning of the requested range is covered by the patch // 3) the end of the requested range is covered by the patch // // Note that on x86, since the break instruction only takes up one byte, the following condition // degenerates to case 1 only. return (((requestedAddr <= patchAddr) && (patchEnd <= (requestedAddr + requestedSize))) || ((patchAddr <= requestedAddr) && (requestedAddr < patchEnd)) || ((patchAddr <= (requestedAddr + requestedSize - 1)) && ((requestedAddr + requestedSize - 1) < patchEnd))); } inline CORDB_ADDRESS ALIGN_ADDRESS( CORDB_ADDRESS val, CORDB_ADDRESS alignment ) { LIMITED_METHOD_DAC_CONTRACT; // alignment must be a power of 2 for this implementation to work (need modulo otherwise) _ASSERTE( 0 == (alignment & (alignment - 1)) ); CORDB_ADDRESS result = (val + (alignment - 1)) & ~(alignment - 1); _ASSERTE( result >= val ); // check for overflow return result; } #include "dacprivate.h" // for MSLAYOUT #include "dumpcommon.h" #endif //DEBUGGER_COMMON_H
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/mono/mono/metadata/gc-internals.h
/** * \file * Internal GC interface * * Author: Paolo Molaro <[email protected]> * * (C) 2002 Ximian, Inc. * Copyright 2012 Xamarin Inc (http://www.xamarin.com) * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #ifndef __MONO_METADATA_GC_INTERNAL_H__ #define __MONO_METADATA_GC_INTERNAL_H__ #include <glib.h> #include <mono/metadata/gc_wrapper.h> #include <mono/metadata/object-internals.h> #include <mono/metadata/threads-types.h> #include <mono/sgen/gc-internal-agnostic.h> #include <mono/metadata/icalls.h> #include <mono/utils/mono-compiler.h> /* Register a memory area as a conservatively scanned GC root */ #define MONO_GC_REGISTER_ROOT_PINNING(x,src,key,msg) mono_gc_register_root ((char*)&(x), sizeof(x), MONO_GC_DESCRIPTOR_NULL, (src), (key), (msg)) #define MONO_GC_UNREGISTER_ROOT(x) mono_gc_deregister_root ((char*)&(x)) /* * The lowest bit is used to mark pinned handles by netcore's GCHandle class. These macros * are used to convert between the old int32 representation to a netcore compatible pointer * representation. */ #define MONO_GC_HANDLE_TO_UINT(ptr) ((guint32)((size_t)(ptr) >> 1)) #define MONO_GC_HANDLE_FROM_UINT(i) ((MonoGCHandle)((size_t)(i) << 1)) /* * Return a GC descriptor for an array containing N pointers to memory allocated * by mono_gc_alloc_fixed (). */ /* For SGEN, the result of alloc_fixed () is not GC tracked memory */ #define MONO_GC_ROOT_DESCR_FOR_FIXED(n) (mono_gc_is_moving () ? mono_gc_make_root_descr_all_refs (0) : MONO_GC_DESCRIPTOR_NULL) /* Register a memory location holding a single object reference as a GC root */ #define MONO_GC_REGISTER_ROOT_SINGLE(x,src,key,msg) do { \ g_assert (sizeof (x) == sizeof (MonoObject*)); \ mono_gc_register_root ((char*)&(x), sizeof(MonoObject*), mono_gc_make_root_descr_all_refs (1), (src), (key),(msg)); \ } while (0) /* * This is used for fields which point to objects which are kept alive by other references * when using Boehm. */ #define MONO_GC_REGISTER_ROOT_IF_MOVING(x,src,key,msg) do { \ if (mono_gc_is_moving ()) \ MONO_GC_REGISTER_ROOT_SINGLE(x,src,key,msg); \ } while (0) #define MONO_GC_UNREGISTER_ROOT_IF_MOVING(x) do { \ if (mono_gc_is_moving ()) \ MONO_GC_UNREGISTER_ROOT (x); \ } while (0) /* useful until we keep track of gc-references in corlib etc. */ #define IS_GC_REFERENCE(class,t) (mono_gc_is_moving () ? FALSE : ((t)->type == MONO_TYPE_U && (class)->image == mono_defaults.corlib)) void mono_object_register_finalizer (MonoObject *obj); void mono_object_register_finalizer_handle (MonoObjectHandle obj); extern void mono_gc_init (void); MONO_COMPONENT_API extern void mono_gc_base_init (void); extern void mono_gc_base_cleanup (void); extern void mono_gc_init_icalls (void); /* * Return whenever the current thread is registered with the GC (i.e. started * by the GC pthread wrappers on unix. */ extern gboolean mono_gc_is_gc_thread (void); MONO_COMPONENT_API extern gboolean mono_gc_is_finalizer_internal_thread (MonoInternalThread *thread); extern void mono_gc_set_stack_end (void *stack_end); /* only valid after the RECLAIM_START GC event and before RECLAIM_END * Not exported in public headers, but can be linked to (unsupported). */ gboolean mono_object_is_alive (MonoObject* obj); MONO_COMPONENT_API gboolean mono_gc_is_finalizer_thread (MonoThread *thread); void mono_gchandle_set_target (MonoGCHandle gchandle, MonoObject *obj); /*Ephemeron functionality. Sgen only*/ gboolean mono_gc_ephemeron_array_add (MonoObject *obj); /* User defined marking function */ /* It should work like this: * foreach (ref in GC references in the are structure pointed to by ADDR) * mark_func (ref) */ typedef void (*MonoGCMarkFunc) (MonoObject **addr, void *gc_data); typedef void (*MonoGCRootMarkFunc) (void *addr, MonoGCMarkFunc mark_func, void *gc_data); /* Create a descriptor with a user defined marking function */ MonoGCDescriptor mono_gc_make_root_descr_user (MonoGCRootMarkFunc marker); /* Return whenever user defined marking functions are supported */ gboolean mono_gc_user_markers_supported (void); /* desc is the result from mono_gc_make_descr*. A NULL value means * all the words might contain GC pointers. * The memory is non-moving and it will be explicitly deallocated. * size bytes will be available from the returned address (ie, descr * must not be stored in the returned memory) */ MonoObject* mono_gc_alloc_fixed (size_t size, MonoGCDescriptor descr, MonoGCRootSource source, void *key, const char *msg); // C++ callers outside of metadata (mini/tasklets.c) must use mono_gc_alloc_fixed_no_descriptor // instead of mono_gc_alloc_fixed, or else compile twice -- boehm and sgen. MonoObject* mono_gc_alloc_fixed_no_descriptor (size_t size, MonoGCRootSource source, void *key, const char *msg); void mono_gc_free_fixed (void* addr); typedef void (*FinalizerThreadCallback) (gpointer user_data); MonoObject* mono_gc_alloc_pinned_obj (MonoVTable *vtable, size_t size); MonoObjectHandle mono_gc_alloc_handle_pinned_obj (MonoVTable *vtable, gsize size); MonoObject* mono_gc_alloc_obj (MonoVTable *vtable, size_t size); MonoObjectHandle mono_gc_alloc_handle_obj (MonoVTable *vtable, gsize size); MonoArray* mono_gc_alloc_vector (MonoVTable *vtable, size_t size, uintptr_t max_length); MonoArray* mono_gc_alloc_pinned_vector (MonoVTable *vtable, size_t size, uintptr_t max_length); MonoArrayHandle mono_gc_alloc_handle_vector (MonoVTable *vtable, gsize size, gsize max_length); MonoArray* mono_gc_alloc_array (MonoVTable *vtable, size_t size, uintptr_t max_length, uintptr_t bounds_size); MonoArrayHandle mono_gc_alloc_handle_array (MonoVTable *vtable, gsize size, gsize max_length, gsize bounds_size); MonoString* mono_gc_alloc_string (MonoVTable *vtable, size_t size, gint32 len); MonoStringHandle mono_gc_alloc_handle_string (MonoVTable *vtable, gsize size, gint32 len); MonoObject* mono_gc_alloc_mature (MonoVTable *vtable, size_t size); MonoGCDescriptor mono_gc_make_descr_for_string (gsize *bitmap, int numbits); MonoObjectHandle mono_gc_alloc_handle_mature (MonoVTable *vtable, gsize size); void mono_gc_register_obj_with_weak_fields (void *obj); void mono_gc_register_object_with_weak_fields (MonoObjectHandle obj); typedef void (*MonoFinalizationProc)(gpointer, gpointer); // same as SGenFinalizationProc, GC_finalization_proc void mono_gc_register_for_finalization (MonoObject *obj, MonoFinalizationProc user_data); void mono_gc_add_memory_pressure (gint64 value); MONO_API int mono_gc_register_root (char *start, size_t size, MonoGCDescriptor descr, MonoGCRootSource source, void *key, const char *msg); MONO_COMPONENT_API void mono_gc_deregister_root (char* addr); void mono_gc_finalize_domain (MonoDomain *domain); void mono_gc_run_finalize (void *obj, void *data); void mono_gc_clear_domain (MonoDomain * domain); /* Signal early termination of finalizer processing inside the gc */ void mono_gc_suspend_finalizers (void); /* * Register a root which can only be written using a write barrier. * Writes to the root must be done using a write barrier (MONO_ROOT_SETREF). * If the root uses an user defined mark routine, the writes are not required to be * to the area between START and START+SIZE. * The write barrier allows the GC to avoid scanning this root at each collection, so it * is more efficient. * FIXME: Add an API for clearing remset entries if a root with a user defined * mark routine is deleted. */ int mono_gc_register_root_wbarrier (char *start, size_t size, MonoGCDescriptor descr, MonoGCRootSource source, void *key, const char *msg); void mono_gc_wbarrier_set_root (gpointer ptr, MonoObject *value); /* Set a field of a root registered using mono_gc_register_root_wbarrier () */ #define MONO_ROOT_SETREF(s,fieldname,value) do { \ mono_gc_wbarrier_set_root (&((s)->fieldname), (MonoObject*)value); \ } while (0) /* fast allocation support */ typedef enum { // Regular fast path allocator. MANAGED_ALLOCATOR_REGULAR, // Managed allocator that just calls into the runtime. MANAGED_ALLOCATOR_SLOW_PATH, // Managed allocator that works like the regular one but also calls into the profiler. MANAGED_ALLOCATOR_PROFILER, } ManagedAllocatorVariant; int mono_gc_get_aligned_size_for_allocator (int size); MonoMethod* mono_gc_get_managed_allocator (MonoClass *klass, gboolean for_box, gboolean known_instance_size); MonoMethod* mono_gc_get_managed_array_allocator (MonoClass *klass); MonoMethod *mono_gc_get_managed_allocator_by_type (int atype, ManagedAllocatorVariant variant); guint32 mono_gc_get_managed_allocator_types (void); /* Return a short string identifying the GC, indented to be saved in AOT images */ const char *mono_gc_get_gc_name (void); /* Fast write barriers */ MonoMethod* mono_gc_get_specific_write_barrier (gboolean is_concurrent); MonoMethod* mono_gc_get_write_barrier (void); /* Fast valuetype copy */ /* WARNING: [dest, dest + size] must be within the bounds of a single type, otherwise the GC will lose remset entries */ G_EXTERN_C void mono_gc_wbarrier_range_copy (gpointer dest, gconstpointer src, int size); typedef void (*MonoRangeCopyFunction)(gpointer, gconstpointer, int size); MonoRangeCopyFunction mono_gc_get_range_copy_func (void); /* * Functions supplied by the runtime and called by the GC. Currently only used * by SGEN. */ typedef struct { /* * Function called during thread startup/attach to allocate thread-local data * needed by the other functions. */ gpointer (*thread_attach_func) (void); /* * Function called during thread deatch to free the data allocated by * thread_attach_func. */ void (*thread_detach_func) (gpointer user_data); /* * Function called from every thread when suspending for GC. It can save * data needed for marking from thread stacks. user_data is the data returned * by attach_func. This might called with GC locks held and the word stopped, * so it shouldn't do any synchronization etc. */ void (*thread_suspend_func) (gpointer user_data, void *sigcontext, MonoContext *ctx); /* * Function called to mark from thread stacks. user_data is the data returned * by attach_func. This is called twice, with the word stopped: * - in the first pass, it should mark areas of the stack using * conservative marking by calling mono_gc_conservatively_scan_area (). * - in the second pass, it should mark the remaining areas of the stack * using precise marking by calling mono_gc_scan_object (). */ void (*thread_mark_func) (gpointer user_data, guint8 *stack_start, guint8 *stack_end, gboolean precise, void *gc_data); /* * Function called for debugging to get the current managed method for * tracking the provenances of objects. */ gpointer (*get_provenance_func) (void); /* * Same as thread_mark_func, mark the intepreter frames. */ void (*interp_mark_func) (gpointer thread_info, GcScanFunc func, gpointer gc_data, gboolean precise); } MonoGCCallbacks; /* Set the callback functions callable by the GC */ void mono_gc_set_gc_callbacks (MonoGCCallbacks *callbacks); MonoGCCallbacks *mono_gc_get_gc_callbacks (void); /* Functions callable from the thread mark func */ /* Scan the memory area between START and END conservatively */ void mono_gc_conservatively_scan_area (void *start, void *end); /* Scan OBJ, returning its new address */ void *mono_gc_scan_object (void *obj, void *gc_data); /* Return the suspend signal number used by the GC to suspend threads, or -1 if not applicable. */ int mono_gc_get_suspend_signal (void); /* Return the suspend signal number used by the GC to suspend threads, or -1 if not applicable. */ int mono_gc_get_restart_signal (void); /* * Return a human readable description of the GC in malloc-ed memory. */ char* mono_gc_get_description (void); /* * Configure the GC to desktop mode */ void mono_gc_set_desktop_mode (void); /* * Return whenever this GC can move objects */ MONO_COMPONENT_API gboolean mono_gc_is_moving (void); typedef void* (*MonoGCLockedCallbackFunc) (void *data); void* mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func, void *data); int mono_gc_get_los_limit (void); guint64 mono_gc_get_allocated_bytes_for_current_thread (void); guint64 mono_gc_get_total_allocated_bytes (MonoBoolean precise); void mono_gc_get_gcmemoryinfo ( gint64 *high_memory_load_threshold_bytes, gint64 *memory_load_bytes, gint64 *total_available_memory_bytes, gint64 *total_committed_bytes, gint64 *heap_size_bytes, gint64 *fragmented_bytes); void mono_gc_get_gctimeinfo ( guint64 *time_last_gc_100ns, guint64 *time_since_last_gc_100ns, guint64 *time_max_gc_100ns); guint8* mono_gc_get_card_table (int *shift_bits, gpointer *card_mask); guint8* mono_gc_get_target_card_table (int *shift_bits, target_mgreg_t *card_mask); gboolean mono_gc_card_table_nursery_check (void); void* mono_gc_get_nursery (int *shift_bits, size_t *size); // Don't use directly; set/unset MONO_THREAD_INFO_FLAGS_NO_GC instead. void mono_gc_skip_thread_changing (gboolean skip); void mono_gc_skip_thread_changed (gboolean skip); #ifndef HOST_WIN32 int mono_gc_pthread_create (pthread_t *new_thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg); #endif /* * Return whenever GC is disabled */ gboolean mono_gc_is_disabled (void); /* * Return whenever this is the null GC */ gboolean mono_gc_is_null (void); void mono_gc_set_string_length (MonoString *str, gint32 new_length); #if defined(__MACH__) void mono_gc_register_mach_exception_thread (pthread_t thread); pthread_t mono_gc_get_mach_exception_thread (void); #endif gboolean mono_gc_precise_stack_mark_enabled (void); typedef struct _RefQueueEntry RefQueueEntry; struct _RefQueueEntry { void *dis_link; MonoGCHandle gchandle; MonoDomain *domain; void *user_data; RefQueueEntry *next; }; struct _MonoReferenceQueue { RefQueueEntry *queue; mono_reference_queue_callback callback; MonoReferenceQueue *next; gboolean should_be_deleted; }; enum { MONO_GC_FINALIZER_EXTENSION_VERSION = 1, }; typedef struct { int version; gboolean (*is_class_finalization_aware) (MonoClass *klass); void (*object_queued_for_finalization) (MonoObject *object); } MonoGCFinalizerCallbacks; MONO_API void mono_gc_register_finalizer_callbacks (MonoGCFinalizerCallbacks *callbacks); #ifdef HOST_WIN32 BOOL APIENTRY mono_gc_dllmain (HMODULE module_handle, DWORD reason, LPVOID reserved); #endif MonoVTable *mono_gc_get_vtable (MonoObject *obj); guint mono_gc_get_vtable_bits (MonoClass *klass); void mono_gc_register_altstack (gpointer stack, gint32 stack_size, gpointer altstack, gint32 altstack_size); gboolean mono_gc_is_critical_method (MonoMethod *method); G_EXTERN_C // due to THREAD_INFO_TYPE varying gpointer mono_gc_thread_attach (THREAD_INFO_TYPE *info); G_EXTERN_C // due to THREAD_INFO_TYPE varying void mono_gc_thread_detach (THREAD_INFO_TYPE *info); G_EXTERN_C // due to THREAD_INFO_TYPE varying void mono_gc_thread_detach_with_lock (THREAD_INFO_TYPE *info); G_EXTERN_C // due to THREAD_INFO_TYPE varying gboolean mono_gc_thread_in_critical_region (THREAD_INFO_TYPE *info); /* If set, print debugging messages around finalizers. */ extern gboolean mono_log_finalizers; /* If set, do not run finalizers. */ extern gboolean mono_do_not_finalize; /* List of names of classes not to finalize. */ extern gchar **mono_do_not_finalize_class_names; /* * Unified runtime stop/restart world, SGEN Only. * Will take and release the LOCK_GC. */ MONO_COMPONENT_API void mono_stop_world (MonoThreadInfoFlags flags); MONO_COMPONENT_API void mono_restart_world (MonoThreadInfoFlags flags); #endif /* __MONO_METADATA_GC_INTERNAL_H__ */
/** * \file * Internal GC interface * * Author: Paolo Molaro <[email protected]> * * (C) 2002 Ximian, Inc. * Copyright 2012 Xamarin Inc (http://www.xamarin.com) * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #ifndef __MONO_METADATA_GC_INTERNAL_H__ #define __MONO_METADATA_GC_INTERNAL_H__ #include <glib.h> #include <mono/metadata/gc_wrapper.h> #include <mono/metadata/object-internals.h> #include <mono/metadata/threads-types.h> #include <mono/sgen/gc-internal-agnostic.h> #include <mono/metadata/icalls.h> #include <mono/utils/mono-compiler.h> /* Register a memory area as a conservatively scanned GC root */ #define MONO_GC_REGISTER_ROOT_PINNING(x,src,key,msg) mono_gc_register_root ((char*)&(x), sizeof(x), MONO_GC_DESCRIPTOR_NULL, (src), (key), (msg)) #define MONO_GC_UNREGISTER_ROOT(x) mono_gc_deregister_root ((char*)&(x)) /* * The lowest bit is used to mark pinned handles by netcore's GCHandle class. These macros * are used to convert between the old int32 representation to a netcore compatible pointer * representation. */ #define MONO_GC_HANDLE_TO_UINT(ptr) ((guint32)((size_t)(ptr) >> 1)) #define MONO_GC_HANDLE_FROM_UINT(i) ((MonoGCHandle)((size_t)(i) << 1)) /* * Return a GC descriptor for an array containing N pointers to memory allocated * by mono_gc_alloc_fixed (). */ /* For SGEN, the result of alloc_fixed () is not GC tracked memory */ #define MONO_GC_ROOT_DESCR_FOR_FIXED(n) (mono_gc_is_moving () ? mono_gc_make_root_descr_all_refs (0) : MONO_GC_DESCRIPTOR_NULL) /* Register a memory location holding a single object reference as a GC root */ #define MONO_GC_REGISTER_ROOT_SINGLE(x,src,key,msg) do { \ g_assert (sizeof (x) == sizeof (MonoObject*)); \ mono_gc_register_root ((char*)&(x), sizeof(MonoObject*), mono_gc_make_root_descr_all_refs (1), (src), (key),(msg)); \ } while (0) /* * This is used for fields which point to objects which are kept alive by other references * when using Boehm. */ #define MONO_GC_REGISTER_ROOT_IF_MOVING(x,src,key,msg) do { \ if (mono_gc_is_moving ()) \ MONO_GC_REGISTER_ROOT_SINGLE(x,src,key,msg); \ } while (0) #define MONO_GC_UNREGISTER_ROOT_IF_MOVING(x) do { \ if (mono_gc_is_moving ()) \ MONO_GC_UNREGISTER_ROOT (x); \ } while (0) /* useful until we keep track of gc-references in corlib etc. */ #define IS_GC_REFERENCE(class,t) (mono_gc_is_moving () ? FALSE : ((t)->type == MONO_TYPE_U && (class)->image == mono_defaults.corlib)) void mono_object_register_finalizer (MonoObject *obj); void mono_object_register_finalizer_handle (MonoObjectHandle obj); extern void mono_gc_init (void); MONO_COMPONENT_API extern void mono_gc_base_init (void); extern void mono_gc_base_cleanup (void); extern void mono_gc_init_icalls (void); /* * Return whenever the current thread is registered with the GC (i.e. started * by the GC pthread wrappers on unix. */ extern gboolean mono_gc_is_gc_thread (void); MONO_COMPONENT_API extern gboolean mono_gc_is_finalizer_internal_thread (MonoInternalThread *thread); extern void mono_gc_set_stack_end (void *stack_end); /* only valid after the RECLAIM_START GC event and before RECLAIM_END * Not exported in public headers, but can be linked to (unsupported). */ gboolean mono_object_is_alive (MonoObject* obj); MONO_COMPONENT_API gboolean mono_gc_is_finalizer_thread (MonoThread *thread); void mono_gchandle_set_target (MonoGCHandle gchandle, MonoObject *obj); /*Ephemeron functionality. Sgen only*/ gboolean mono_gc_ephemeron_array_add (MonoObject *obj); /* User defined marking function */ /* It should work like this: * foreach (ref in GC references in the are structure pointed to by ADDR) * mark_func (ref) */ typedef void (*MonoGCMarkFunc) (MonoObject **addr, void *gc_data); typedef void (*MonoGCRootMarkFunc) (void *addr, MonoGCMarkFunc mark_func, void *gc_data); /* Create a descriptor with a user defined marking function */ MonoGCDescriptor mono_gc_make_root_descr_user (MonoGCRootMarkFunc marker); /* Return whenever user defined marking functions are supported */ gboolean mono_gc_user_markers_supported (void); /* desc is the result from mono_gc_make_descr*. A NULL value means * all the words might contain GC pointers. * The memory is non-moving and it will be explicitly deallocated. * size bytes will be available from the returned address (ie, descr * must not be stored in the returned memory) */ MonoObject* mono_gc_alloc_fixed (size_t size, MonoGCDescriptor descr, MonoGCRootSource source, void *key, const char *msg); // C++ callers outside of metadata (mini/tasklets.c) must use mono_gc_alloc_fixed_no_descriptor // instead of mono_gc_alloc_fixed, or else compile twice -- boehm and sgen. MonoObject* mono_gc_alloc_fixed_no_descriptor (size_t size, MonoGCRootSource source, void *key, const char *msg); void mono_gc_free_fixed (void* addr); typedef void (*FinalizerThreadCallback) (gpointer user_data); MonoObject* mono_gc_alloc_pinned_obj (MonoVTable *vtable, size_t size); MonoObjectHandle mono_gc_alloc_handle_pinned_obj (MonoVTable *vtable, gsize size); MonoObject* mono_gc_alloc_obj (MonoVTable *vtable, size_t size); MonoObjectHandle mono_gc_alloc_handle_obj (MonoVTable *vtable, gsize size); MonoArray* mono_gc_alloc_vector (MonoVTable *vtable, size_t size, uintptr_t max_length); MonoArray* mono_gc_alloc_pinned_vector (MonoVTable *vtable, size_t size, uintptr_t max_length); MonoArrayHandle mono_gc_alloc_handle_vector (MonoVTable *vtable, gsize size, gsize max_length); MonoArray* mono_gc_alloc_array (MonoVTable *vtable, size_t size, uintptr_t max_length, uintptr_t bounds_size); MonoArrayHandle mono_gc_alloc_handle_array (MonoVTable *vtable, gsize size, gsize max_length, gsize bounds_size); MonoString* mono_gc_alloc_string (MonoVTable *vtable, size_t size, gint32 len); MonoStringHandle mono_gc_alloc_handle_string (MonoVTable *vtable, gsize size, gint32 len); MonoObject* mono_gc_alloc_mature (MonoVTable *vtable, size_t size); MonoGCDescriptor mono_gc_make_descr_for_string (gsize *bitmap, int numbits); MonoObjectHandle mono_gc_alloc_handle_mature (MonoVTable *vtable, gsize size); void mono_gc_register_obj_with_weak_fields (void *obj); void mono_gc_register_object_with_weak_fields (MonoObjectHandle obj); typedef void (*MonoFinalizationProc)(gpointer, gpointer); // same as SGenFinalizationProc, GC_finalization_proc void mono_gc_register_for_finalization (MonoObject *obj, MonoFinalizationProc user_data); void mono_gc_add_memory_pressure (gint64 value); MONO_API int mono_gc_register_root (char *start, size_t size, MonoGCDescriptor descr, MonoGCRootSource source, void *key, const char *msg); MONO_COMPONENT_API void mono_gc_deregister_root (char* addr); void mono_gc_finalize_domain (MonoDomain *domain); void mono_gc_run_finalize (void *obj, void *data); void mono_gc_clear_domain (MonoDomain * domain); /* Signal early termination of finalizer processing inside the gc */ void mono_gc_suspend_finalizers (void); /* * Register a root which can only be written using a write barrier. * Writes to the root must be done using a write barrier (MONO_ROOT_SETREF). * If the root uses an user defined mark routine, the writes are not required to be * to the area between START and START+SIZE. * The write barrier allows the GC to avoid scanning this root at each collection, so it * is more efficient. * FIXME: Add an API for clearing remset entries if a root with a user defined * mark routine is deleted. */ int mono_gc_register_root_wbarrier (char *start, size_t size, MonoGCDescriptor descr, MonoGCRootSource source, void *key, const char *msg); void mono_gc_wbarrier_set_root (gpointer ptr, MonoObject *value); /* Set a field of a root registered using mono_gc_register_root_wbarrier () */ #define MONO_ROOT_SETREF(s,fieldname,value) do { \ mono_gc_wbarrier_set_root (&((s)->fieldname), (MonoObject*)value); \ } while (0) /* fast allocation support */ typedef enum { // Regular fast path allocator. MANAGED_ALLOCATOR_REGULAR, // Managed allocator that just calls into the runtime. MANAGED_ALLOCATOR_SLOW_PATH, // Managed allocator that works like the regular one but also calls into the profiler. MANAGED_ALLOCATOR_PROFILER, } ManagedAllocatorVariant; int mono_gc_get_aligned_size_for_allocator (int size); MonoMethod* mono_gc_get_managed_allocator (MonoClass *klass, gboolean for_box, gboolean known_instance_size); MonoMethod* mono_gc_get_managed_array_allocator (MonoClass *klass); MonoMethod *mono_gc_get_managed_allocator_by_type (int atype, ManagedAllocatorVariant variant); guint32 mono_gc_get_managed_allocator_types (void); /* Return a short string identifying the GC, indented to be saved in AOT images */ const char *mono_gc_get_gc_name (void); /* Fast write barriers */ MonoMethod* mono_gc_get_specific_write_barrier (gboolean is_concurrent); MonoMethod* mono_gc_get_write_barrier (void); /* Fast valuetype copy */ /* WARNING: [dest, dest + size] must be within the bounds of a single type, otherwise the GC will lose remset entries */ G_EXTERN_C void mono_gc_wbarrier_range_copy (gpointer dest, gconstpointer src, int size); typedef void (*MonoRangeCopyFunction)(gpointer, gconstpointer, int size); MonoRangeCopyFunction mono_gc_get_range_copy_func (void); /* * Functions supplied by the runtime and called by the GC. Currently only used * by SGEN. */ typedef struct { /* * Function called during thread startup/attach to allocate thread-local data * needed by the other functions. */ gpointer (*thread_attach_func) (void); /* * Function called during thread deatch to free the data allocated by * thread_attach_func. */ void (*thread_detach_func) (gpointer user_data); /* * Function called from every thread when suspending for GC. It can save * data needed for marking from thread stacks. user_data is the data returned * by attach_func. This might called with GC locks held and the word stopped, * so it shouldn't do any synchronization etc. */ void (*thread_suspend_func) (gpointer user_data, void *sigcontext, MonoContext *ctx); /* * Function called to mark from thread stacks. user_data is the data returned * by attach_func. This is called twice, with the word stopped: * - in the first pass, it should mark areas of the stack using * conservative marking by calling mono_gc_conservatively_scan_area (). * - in the second pass, it should mark the remaining areas of the stack * using precise marking by calling mono_gc_scan_object (). */ void (*thread_mark_func) (gpointer user_data, guint8 *stack_start, guint8 *stack_end, gboolean precise, void *gc_data); /* * Function called for debugging to get the current managed method for * tracking the provenances of objects. */ gpointer (*get_provenance_func) (void); /* * Same as thread_mark_func, mark the intepreter frames. */ void (*interp_mark_func) (gpointer thread_info, GcScanFunc func, gpointer gc_data, gboolean precise); } MonoGCCallbacks; /* Set the callback functions callable by the GC */ void mono_gc_set_gc_callbacks (MonoGCCallbacks *callbacks); MonoGCCallbacks *mono_gc_get_gc_callbacks (void); /* Functions callable from the thread mark func */ /* Scan the memory area between START and END conservatively */ void mono_gc_conservatively_scan_area (void *start, void *end); /* Scan OBJ, returning its new address */ void *mono_gc_scan_object (void *obj, void *gc_data); /* Return the suspend signal number used by the GC to suspend threads, or -1 if not applicable. */ int mono_gc_get_suspend_signal (void); /* Return the suspend signal number used by the GC to suspend threads, or -1 if not applicable. */ int mono_gc_get_restart_signal (void); /* * Return a human readable description of the GC in malloc-ed memory. */ char* mono_gc_get_description (void); /* * Configure the GC to desktop mode */ void mono_gc_set_desktop_mode (void); /* * Return whenever this GC can move objects */ MONO_COMPONENT_API gboolean mono_gc_is_moving (void); typedef void* (*MonoGCLockedCallbackFunc) (void *data); void* mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func, void *data); int mono_gc_get_los_limit (void); guint64 mono_gc_get_allocated_bytes_for_current_thread (void); guint64 mono_gc_get_total_allocated_bytes (MonoBoolean precise); void mono_gc_get_gcmemoryinfo ( gint64 *high_memory_load_threshold_bytes, gint64 *memory_load_bytes, gint64 *total_available_memory_bytes, gint64 *total_committed_bytes, gint64 *heap_size_bytes, gint64 *fragmented_bytes); void mono_gc_get_gctimeinfo ( guint64 *time_last_gc_100ns, guint64 *time_since_last_gc_100ns, guint64 *time_max_gc_100ns); guint8* mono_gc_get_card_table (int *shift_bits, gpointer *card_mask); guint8* mono_gc_get_target_card_table (int *shift_bits, target_mgreg_t *card_mask); gboolean mono_gc_card_table_nursery_check (void); void* mono_gc_get_nursery (int *shift_bits, size_t *size); // Don't use directly; set/unset MONO_THREAD_INFO_FLAGS_NO_GC instead. void mono_gc_skip_thread_changing (gboolean skip); void mono_gc_skip_thread_changed (gboolean skip); #ifndef HOST_WIN32 int mono_gc_pthread_create (pthread_t *new_thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg); #endif /* * Return whenever GC is disabled */ gboolean mono_gc_is_disabled (void); /* * Return whenever this is the null GC */ gboolean mono_gc_is_null (void); void mono_gc_set_string_length (MonoString *str, gint32 new_length); #if defined(__MACH__) void mono_gc_register_mach_exception_thread (pthread_t thread); pthread_t mono_gc_get_mach_exception_thread (void); #endif gboolean mono_gc_precise_stack_mark_enabled (void); typedef struct _RefQueueEntry RefQueueEntry; struct _RefQueueEntry { void *dis_link; MonoGCHandle gchandle; MonoDomain *domain; void *user_data; RefQueueEntry *next; }; struct _MonoReferenceQueue { RefQueueEntry *queue; mono_reference_queue_callback callback; MonoReferenceQueue *next; gboolean should_be_deleted; }; enum { MONO_GC_FINALIZER_EXTENSION_VERSION = 1, }; typedef struct { int version; gboolean (*is_class_finalization_aware) (MonoClass *klass); void (*object_queued_for_finalization) (MonoObject *object); } MonoGCFinalizerCallbacks; MONO_API void mono_gc_register_finalizer_callbacks (MonoGCFinalizerCallbacks *callbacks); #ifdef HOST_WIN32 BOOL APIENTRY mono_gc_dllmain (HMODULE module_handle, DWORD reason, LPVOID reserved); #endif MonoVTable *mono_gc_get_vtable (MonoObject *obj); guint mono_gc_get_vtable_bits (MonoClass *klass); void mono_gc_register_altstack (gpointer stack, gint32 stack_size, gpointer altstack, gint32 altstack_size); gboolean mono_gc_is_critical_method (MonoMethod *method); G_EXTERN_C // due to THREAD_INFO_TYPE varying gpointer mono_gc_thread_attach (THREAD_INFO_TYPE *info); G_EXTERN_C // due to THREAD_INFO_TYPE varying void mono_gc_thread_detach (THREAD_INFO_TYPE *info); G_EXTERN_C // due to THREAD_INFO_TYPE varying void mono_gc_thread_detach_with_lock (THREAD_INFO_TYPE *info); G_EXTERN_C // due to THREAD_INFO_TYPE varying gboolean mono_gc_thread_in_critical_region (THREAD_INFO_TYPE *info); /* If set, print debugging messages around finalizers. */ extern gboolean mono_log_finalizers; /* If set, do not run finalizers. */ extern gboolean mono_do_not_finalize; /* List of names of classes not to finalize. */ extern gchar **mono_do_not_finalize_class_names; /* * Unified runtime stop/restart world, SGEN Only. * Will take and release the LOCK_GC. */ MONO_COMPONENT_API void mono_stop_world (MonoThreadInfoFlags flags); MONO_COMPONENT_API void mono_restart_world (MonoThreadInfoFlags flags); #endif /* __MONO_METADATA_GC_INTERNAL_H__ */
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/vm/debuginfostore.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // DebugInfoStore #ifndef __DebugInfoStore_H_ #define __DebugInfoStore_H_ // Debugging information is described in CorInfo.h #include "corinfo.h" #include "nibblestream.h" //----------------------------------------------------------------------------- // Information to request Debug info. //----------------------------------------------------------------------------- class DebugInfoRequest { public: #ifdef _DEBUG // Must initialize via an Init*() function, not just a ctor. // In debug, ctor sets fields to values that will cause asserts if not initialized. DebugInfoRequest() { SUPPORTS_DAC; m_pMD = NULL; m_addrStart = NULL; } #endif // Eventually we may have many ways to initialize a request. // Init given a method desc and starting address for a native code blob. void InitFromStartingAddr(MethodDesc * pDesc, PCODE addrCode); MethodDesc * GetMD() const { LIMITED_METHOD_DAC_CONTRACT; return m_pMD; } PCODE GetStartAddress() const { LIMITED_METHOD_DAC_CONTRACT; return m_addrStart; } protected: MethodDesc * m_pMD; PCODE m_addrStart; }; //----------------------------------------------------------------------------- // A Debug-Info Store abstracts the storage of debugging information //----------------------------------------------------------------------------- // We pass the IDS an allocator which it uses to hand the data back. // pData is data the allocator may use for 'new'. // Eg, perhaps we have multiple heaps (eg, loader-heaps per appdomain). typedef BYTE* (*FP_IDS_NEW)(void * pData, size_t cBytes); //----------------------------------------------------------------------------- // Utility routines used for compression // Note that the compression is just an implementation detail of the stores, // and so these are just utility routines exposed to the stores. //----------------------------------------------------------------------------- class CompressDebugInfo { public: // Compress incoming data and write it to the provided NibbleWriter. static void CompressBoundaries( IN ULONG32 cMap, IN ICorDebugInfo::OffsetMapping *pMap, IN OUT NibbleWriter * pWriter ); static void CompressVars( IN ULONG32 cVars, IN ICorDebugInfo::NativeVarInfo *vars, IN OUT NibbleWriter * pBuffer ); // Stores the result into SBuffer (used by NGen), or in LoaderHeap (used by JIT) static PTR_BYTE CompressBoundariesAndVars( IN ICorDebugInfo::OffsetMapping * pOffsetMapping, IN ULONG iOffsetMapping, IN ICorDebugInfo::NativeVarInfo * pNativeVarInfo, IN ULONG iNativeVarInfo, IN PatchpointInfo * patchpointInfo, IN OUT SBuffer * pDebugInfoBuffer, IN LoaderHeap * pLoaderHeap ); public: // Uncompress data supplied by Compress functions. static void RestoreBoundariesAndVars( IN FP_IDS_NEW fpNew, IN void * pNewData, IN PTR_BYTE pDebugInfo, OUT ULONG32 * pcMap, // number of entries in ppMap OUT ICorDebugInfo::OffsetMapping **ppMap, // pointer to newly allocated array OUT ULONG32 *pcVars, OUT ICorDebugInfo::NativeVarInfo **ppVars, BOOL hasFlagByte ); #ifdef FEATURE_ON_STACK_REPLACEMENT static PatchpointInfo * RestorePatchpointInfo( IN PTR_BYTE pDebugInfo ); #endif #ifdef DACCESS_COMPILE static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags, PTR_BYTE pDebugInfo, BOOL hasFlagByte); #endif }; //----------------------------------------------------------------------------- // Debug-Info-manager. This is like a process-wide store. // There should be only 1 instance of this and it's process-wide. // It will delegate to sub-stores as needed //----------------------------------------------------------------------------- class DebugInfoManager { public: static BOOL GetBoundariesAndVars( const DebugInfoRequest & request, IN FP_IDS_NEW fpNew, IN void * pNewData, OUT ULONG32 * pcMap, OUT ICorDebugInfo::OffsetMapping ** ppMap, OUT ULONG32 * pcVars, OUT ICorDebugInfo::NativeVarInfo ** ppVars); #ifdef DACCESS_COMPILE static void EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD); #endif }; #endif // __DebugInfoStore_H_
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // DebugInfoStore #ifndef __DebugInfoStore_H_ #define __DebugInfoStore_H_ // Debugging information is described in CorInfo.h #include "corinfo.h" #include "nibblestream.h" //----------------------------------------------------------------------------- // Information to request Debug info. //----------------------------------------------------------------------------- class DebugInfoRequest { public: #ifdef _DEBUG // Must initialize via an Init*() function, not just a ctor. // In debug, ctor sets fields to values that will cause asserts if not initialized. DebugInfoRequest() { SUPPORTS_DAC; m_pMD = NULL; m_addrStart = NULL; } #endif // Eventually we may have many ways to initialize a request. // Init given a method desc and starting address for a native code blob. void InitFromStartingAddr(MethodDesc * pDesc, PCODE addrCode); MethodDesc * GetMD() const { LIMITED_METHOD_DAC_CONTRACT; return m_pMD; } PCODE GetStartAddress() const { LIMITED_METHOD_DAC_CONTRACT; return m_addrStart; } protected: MethodDesc * m_pMD; PCODE m_addrStart; }; //----------------------------------------------------------------------------- // A Debug-Info Store abstracts the storage of debugging information //----------------------------------------------------------------------------- // We pass the IDS an allocator which it uses to hand the data back. // pData is data the allocator may use for 'new'. // Eg, perhaps we have multiple heaps (eg, loader-heaps per appdomain). typedef BYTE* (*FP_IDS_NEW)(void * pData, size_t cBytes); //----------------------------------------------------------------------------- // Utility routines used for compression // Note that the compression is just an implementation detail of the stores, // and so these are just utility routines exposed to the stores. //----------------------------------------------------------------------------- class CompressDebugInfo { public: // Compress incoming data and write it to the provided NibbleWriter. static void CompressBoundaries( IN ULONG32 cMap, IN ICorDebugInfo::OffsetMapping *pMap, IN OUT NibbleWriter * pWriter ); static void CompressVars( IN ULONG32 cVars, IN ICorDebugInfo::NativeVarInfo *vars, IN OUT NibbleWriter * pBuffer ); // Stores the result into SBuffer (used by NGen), or in LoaderHeap (used by JIT) static PTR_BYTE CompressBoundariesAndVars( IN ICorDebugInfo::OffsetMapping * pOffsetMapping, IN ULONG iOffsetMapping, IN ICorDebugInfo::NativeVarInfo * pNativeVarInfo, IN ULONG iNativeVarInfo, IN PatchpointInfo * patchpointInfo, IN OUT SBuffer * pDebugInfoBuffer, IN LoaderHeap * pLoaderHeap ); public: // Uncompress data supplied by Compress functions. static void RestoreBoundariesAndVars( IN FP_IDS_NEW fpNew, IN void * pNewData, IN PTR_BYTE pDebugInfo, OUT ULONG32 * pcMap, // number of entries in ppMap OUT ICorDebugInfo::OffsetMapping **ppMap, // pointer to newly allocated array OUT ULONG32 *pcVars, OUT ICorDebugInfo::NativeVarInfo **ppVars, BOOL hasFlagByte ); #ifdef FEATURE_ON_STACK_REPLACEMENT static PatchpointInfo * RestorePatchpointInfo( IN PTR_BYTE pDebugInfo ); #endif #ifdef DACCESS_COMPILE static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags, PTR_BYTE pDebugInfo, BOOL hasFlagByte); #endif }; //----------------------------------------------------------------------------- // Debug-Info-manager. This is like a process-wide store. // There should be only 1 instance of this and it's process-wide. // It will delegate to sub-stores as needed //----------------------------------------------------------------------------- class DebugInfoManager { public: static BOOL GetBoundariesAndVars( const DebugInfoRequest & request, IN FP_IDS_NEW fpNew, IN void * pNewData, OUT ULONG32 * pcMap, OUT ICorDebugInfo::OffsetMapping ** ppMap, OUT ULONG32 * pcVars, OUT ICorDebugInfo::NativeVarInfo ** ppVars); #ifdef DACCESS_COMPILE static void EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD); #endif }; #endif // __DebugInfoStore_H_
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/native/eventpipe/ep-provider.h
#ifndef __EVENTPIPE_PROVIDER_H__ #define __EVENTPIPE_PROVIDER_H__ #include "ep-rt-config.h" #ifdef ENABLE_PERFTRACING #include "ep-types.h" #undef EP_IMPL_GETTER_SETTER #ifdef EP_IMPL_PROVIDER_GETTER_SETTER #define EP_IMPL_GETTER_SETTER #endif #include "ep-getter-setter.h" /* * EventPipeProvider. */ #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_PROVIDER_GETTER_SETTER) struct _EventPipeProvider { #else struct _EventPipeProvider_Internal { #endif // Bit vector containing the currently enabled keywords. int64_t keywords; // Bit mask of sessions for which this provider is enabled. uint64_t sessions; // The name of the provider. ep_char8_t *provider_name; ep_char16_t *provider_name_utf16; // List of every event currently associated with the provider. // New events can be added on-the-fly. ep_rt_event_list_t event_list; // The optional provider callback function. EventPipeCallback callback_func; // The optional provider callback_data free callback function. EventPipeCallbackDataFree callback_data_free_func; // The optional provider callback data pointer. void *callback_data; // The configuration object. EventPipeConfiguration *config; // The current verbosity of the provider. EventPipeEventLevel provider_level; // True if the provider has been deleted, but that deletion // has been deferred until tracing is stopped. bool delete_deferred; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_PROVIDER_GETTER_SETTER) struct _EventPipeProvider { uint8_t _internal [sizeof (struct _EventPipeProvider_Internal)]; }; #endif EP_DEFINE_GETTER(EventPipeProvider *, provider, const ep_char8_t *, provider_name) EP_DEFINE_GETTER(EventPipeProvider *, provider, const ep_char16_t *, provider_name_utf16) EP_DEFINE_GETTER(EventPipeProvider *, provider, bool, delete_deferred) EP_DEFINE_GETTER(EventPipeProvider *, provider, uint64_t, sessions) static inline bool ep_provider_get_enabled (const EventPipeProvider *provider) { return ep_provider_get_sessions (provider) != 0; } static inline bool ep_provider_is_enabled_by_mask ( const EventPipeProvider *provider, uint64_t session_mask) { return ((ep_provider_get_sessions (provider) & session_mask) != 0); } static inline const ep_char8_t * ep_provider_get_wildcard_name_utf8 (void) { return "*"; } static inline const ep_char8_t * ep_provider_get_default_name_utf8 (void) { return "Microsoft-DotNETCore-EventPipe"; } EventPipeProvider * ep_provider_alloc ( EventPipeConfiguration *config, const ep_char8_t *provider_name, EventPipeCallback callback_func, EventPipeCallbackDataFree callback_data_free_func, void *callback_data); void ep_provider_free (EventPipeProvider * provider); // Add an event to the provider. EventPipeEvent * ep_provider_add_event ( EventPipeProvider *provider, uint32_t event_id, uint64_t keywords, uint32_t event_version, EventPipeEventLevel level, bool need_stack, const uint8_t *metadata, uint32_t metadata_len); void ep_provider_set_delete_deferred ( EventPipeProvider *provider, bool deferred); #endif /* ENABLE_PERFTRACING */ #endif /* __EVENTPIPE_PROVIDER_H__ */
#ifndef __EVENTPIPE_PROVIDER_H__ #define __EVENTPIPE_PROVIDER_H__ #include "ep-rt-config.h" #ifdef ENABLE_PERFTRACING #include "ep-types.h" #undef EP_IMPL_GETTER_SETTER #ifdef EP_IMPL_PROVIDER_GETTER_SETTER #define EP_IMPL_GETTER_SETTER #endif #include "ep-getter-setter.h" /* * EventPipeProvider. */ #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_PROVIDER_GETTER_SETTER) struct _EventPipeProvider { #else struct _EventPipeProvider_Internal { #endif // Bit vector containing the currently enabled keywords. int64_t keywords; // Bit mask of sessions for which this provider is enabled. uint64_t sessions; // The name of the provider. ep_char8_t *provider_name; ep_char16_t *provider_name_utf16; // List of every event currently associated with the provider. // New events can be added on-the-fly. ep_rt_event_list_t event_list; // The optional provider callback function. EventPipeCallback callback_func; // The optional provider callback_data free callback function. EventPipeCallbackDataFree callback_data_free_func; // The optional provider callback data pointer. void *callback_data; // The configuration object. EventPipeConfiguration *config; // The current verbosity of the provider. EventPipeEventLevel provider_level; // True if the provider has been deleted, but that deletion // has been deferred until tracing is stopped. bool delete_deferred; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_PROVIDER_GETTER_SETTER) struct _EventPipeProvider { uint8_t _internal [sizeof (struct _EventPipeProvider_Internal)]; }; #endif EP_DEFINE_GETTER(EventPipeProvider *, provider, const ep_char8_t *, provider_name) EP_DEFINE_GETTER(EventPipeProvider *, provider, const ep_char16_t *, provider_name_utf16) EP_DEFINE_GETTER(EventPipeProvider *, provider, bool, delete_deferred) EP_DEFINE_GETTER(EventPipeProvider *, provider, uint64_t, sessions) static inline bool ep_provider_get_enabled (const EventPipeProvider *provider) { return ep_provider_get_sessions (provider) != 0; } static inline bool ep_provider_is_enabled_by_mask ( const EventPipeProvider *provider, uint64_t session_mask) { return ((ep_provider_get_sessions (provider) & session_mask) != 0); } static inline const ep_char8_t * ep_provider_get_wildcard_name_utf8 (void) { return "*"; } static inline const ep_char8_t * ep_provider_get_default_name_utf8 (void) { return "Microsoft-DotNETCore-EventPipe"; } EventPipeProvider * ep_provider_alloc ( EventPipeConfiguration *config, const ep_char8_t *provider_name, EventPipeCallback callback_func, EventPipeCallbackDataFree callback_data_free_func, void *callback_data); void ep_provider_free (EventPipeProvider * provider); // Add an event to the provider. EventPipeEvent * ep_provider_add_event ( EventPipeProvider *provider, uint32_t event_id, uint64_t keywords, uint32_t event_version, EventPipeEventLevel level, bool need_stack, const uint8_t *metadata, uint32_t metadata_len); void ep_provider_set_delete_deferred ( EventPipeProvider *provider, bool deferred); #endif /* ENABLE_PERFTRACING */ #endif /* __EVENTPIPE_PROVIDER_H__ */
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/ildasm/dasm_sz.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef _DASM_SZ_H_ #define _DASM_SZ_H_ unsigned SizeOfValueType(mdToken tk, IMDInternalImport* pImport); unsigned SizeOfField(mdToken tk, IMDInternalImport* pImport); unsigned SizeOfField(PCCOR_SIGNATURE *ppSig, ULONG cSig, IMDInternalImport* pImport); #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef _DASM_SZ_H_ #define _DASM_SZ_H_ unsigned SizeOfValueType(mdToken tk, IMDInternalImport* pImport); unsigned SizeOfField(mdToken tk, IMDInternalImport* pImport); unsigned SizeOfField(PCCOR_SIGNATURE *ppSig, ULONG cSig, IMDInternalImport* pImport); #endif
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/vm/dynamicmethod.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // #include "common.h" #include "dynamicmethod.h" #include "object.h" #include "method.hpp" #include "comdelegate.h" #include "field.h" #include "contractimpl.h" #include "nibblemapmacros.h" #include "stringliteralmap.h" #include "virtualcallstub.h" #ifndef DACCESS_COMPILE // get the method table for dynamic methods DynamicMethodTable* DomainAssembly::GetDynamicMethodTable() { CONTRACT (DynamicMethodTable*) { INSTANCE_CHECK; THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM()); POSTCONDITION(CheckPointer(m_pDynamicMethodTable)); } CONTRACT_END; if (!m_pDynamicMethodTable) DynamicMethodTable::CreateDynamicMethodTable(&m_pDynamicMethodTable, GetModule(), GetAppDomain()); RETURN m_pDynamicMethodTable; } void ReleaseDynamicMethodTable(DynamicMethodTable *pDynMT) { WRAPPER_NO_CONTRACT; if (pDynMT) { pDynMT->Destroy(); } } void DynamicMethodTable::CreateDynamicMethodTable(DynamicMethodTable **ppLocation, Module *pModule, AppDomain *pDomain) { CONTRACT_VOID { THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM()); PRECONDITION(CheckPointer(ppLocation)); PRECONDITION(CheckPointer(pModule)); POSTCONDITION(CheckPointer(*ppLocation)); } CONTRACT_END; AllocMemTracker amt; LoaderHeap* pHeap = pDomain->GetHighFrequencyHeap(); _ASSERTE(pHeap); if (*ppLocation) RETURN; DynamicMethodTable* pDynMT = (DynamicMethodTable*) amt.Track(pHeap->AllocMem(S_SIZE_T(sizeof(DynamicMethodTable)))); // Note: Memory allocated on loader heap is zero filled // memset((void*)pDynMT, 0, sizeof(DynamicMethodTable)); if (*ppLocation) RETURN; LOG((LF_BCL, LL_INFO100, "Level2 - Creating DynamicMethodTable {0x%p}...\n", pDynMT)); Holder<DynamicMethodTable*, DoNothing, ReleaseDynamicMethodTable> dynMTHolder(pDynMT); pDynMT->m_Crst.Init(CrstDynamicMT); pDynMT->m_Module = pModule; pDynMT->m_pDomain = pDomain; pDynMT->MakeMethodTable(&amt); if (*ppLocation) RETURN; if (FastInterlockCompareExchangePointer(ppLocation, pDynMT, NULL) != NULL) { LOG((LF_BCL, LL_INFO100, "Level2 - Another thread got here first - deleting DynamicMethodTable {0x%p}...\n", pDynMT)); RETURN; } dynMTHolder.SuppressRelease(); amt.SuppressRelease(); LOG((LF_BCL, LL_INFO10, "Level1 - DynamicMethodTable created {0x%p}...\n", pDynMT)); RETURN; } void DynamicMethodTable::MakeMethodTable(AllocMemTracker *pamTracker) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM()); } CONTRACTL_END; m_pMethodTable = CreateMinimalMethodTable(m_Module, m_pDomain->GetHighFrequencyHeap(), pamTracker); } void DynamicMethodTable::Destroy() { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; #if _DEBUG // This method should be called only for collectible types or for non-collectible ones // at the construction time when there are no DynamicMethodDesc instances added to the // DynamicMethodTable yet (from the DynamicMethodTable::CreateDynamicMethodTable in case // there were two threads racing to construct the instance for the thread that lost // the race) if (m_pMethodTable != NULL && !m_pMethodTable->GetLoaderAllocator()->IsCollectible()) { MethodTable::IntroducedMethodIterator it(m_pMethodTable); _ASSERTE(!it.IsValid()); } #endif m_Crst.Destroy(); LOG((LF_BCL, LL_INFO10, "Level1 - DynamicMethodTable destroyed {0x%p}\n", this)); } void DynamicMethodTable::AddMethodsToList() { CONTRACT_VOID { THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM()); } CONTRACT_END; AllocMemTracker amt; LoaderHeap* pHeap = m_pMethodTable->GetLoaderAllocator()->GetHighFrequencyHeap(); _ASSERTE(pHeap); // // allocate as many chunks as needed to hold the methods // MethodDescChunk* pChunk = MethodDescChunk::CreateChunk(pHeap, 0 /* one chunk of maximum size */, mcDynamic, TRUE /* fNonVtableSlot */, TRUE /* fNativeCodeSlot */, FALSE /* fComPlusCallInfo */, m_pMethodTable, &amt); if (m_DynamicMethodList) RETURN; int methodCount = pChunk->GetCount(); BYTE* pResolvers = (BYTE*)amt.Track(pHeap->AllocMem(S_SIZE_T(sizeof(LCGMethodResolver)) * S_SIZE_T(methodCount))); if (m_DynamicMethodList) RETURN; DynamicMethodDesc *pNewMD = (DynamicMethodDesc *)pChunk->GetFirstMethodDesc(); DynamicMethodDesc *pPrevMD = NULL; // now go through all the methods in the chunk and link them for(int i = 0; i < methodCount; i++) { _ASSERTE(pNewMD->GetClassification() == mcDynamic); pNewMD->SetMemberDef(0); pNewMD->SetSlot(MethodTable::NO_SLOT); // we can't ever use the slot for dynamic methods pNewMD->SetStatic(); pNewMD->InitializeFlags(DynamicMethodDesc::FlagPublic | DynamicMethodDesc::FlagStatic | DynamicMethodDesc::FlagIsLCGMethod); LCGMethodResolver* pResolver = new (pResolvers) LCGMethodResolver(); pResolver->m_pDynamicMethod = pNewMD; pResolver->m_DynamicMethodTable = this; pNewMD->m_pResolver = pResolver; pNewMD->SetTemporaryEntryPoint(m_pDomain->GetLoaderAllocator(), &amt); #ifdef _DEBUG pNewMD->m_pDebugMethodTable = m_pMethodTable; #endif if (pPrevMD) { pPrevMD->GetLCGMethodResolver()->m_next = pNewMD; } pPrevMD = pNewMD; pNewMD = (DynamicMethodDesc *)(dac_cast<TADDR>(pNewMD) + pNewMD->SizeOf()); pResolvers += sizeof(LCGMethodResolver); } if (m_DynamicMethodList) RETURN; { // publish method list and method table LockHolder lh(this); if (m_DynamicMethodList) RETURN; // publish the new method descs on the method table m_pMethodTable->GetClass()->AddChunk(pChunk); m_DynamicMethodList = (DynamicMethodDesc*)pChunk->GetFirstMethodDesc(); } amt.SuppressRelease(); } DynamicMethodDesc* DynamicMethodTable::GetDynamicMethod(BYTE *psig, DWORD sigSize, PTR_CUTF8 name) { CONTRACT (DynamicMethodDesc*) { INSTANCE_CHECK; THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM()); PRECONDITION(CheckPointer(psig)); PRECONDITION(sigSize > 0); POSTCONDITION(CheckPointer(RETVAL)); } CONTRACT_END; LOG((LF_BCL, LL_INFO10000, "Level4 - Getting DynamicMethod\n")); DynamicMethodDesc *pNewMD = NULL; for (;;) { { LockHolder lh(this); pNewMD = m_DynamicMethodList; if (pNewMD) { m_DynamicMethodList = pNewMD->GetLCGMethodResolver()->m_next; #ifdef _DEBUG m_Used++; #endif break; } } LOG((LF_BCL, LL_INFO1000, "Level4 - DynamicMethod unavailable\n")); // need to create more methoddescs AddMethodsToList(); } _ASSERTE(pNewMD != NULL); // Reset the method desc into pristine state // Note: Reset has THROWS contract since it may allocate jump stub. It will never throw here // since it will always reuse the existing jump stub. pNewMD->Reset(); LOG((LF_BCL, LL_INFO1000, "Level3 - DynamicMethod obtained {0x%p} (used %d)\n", pNewMD, m_Used)); // the store sig part of the method desc pNewMD->SetStoredMethodSig((PCCOR_SIGNATURE)psig, sigSize); // the dynamic part of the method desc pNewMD->m_pszMethodName = name; pNewMD->InitializeFlags(DynamicMethodDesc::FlagPublic | DynamicMethodDesc::FlagStatic | DynamicMethodDesc::FlagIsLCGMethod); #ifdef _DEBUG pNewMD->m_pszDebugMethodName = name; pNewMD->m_pszDebugClassName = (LPUTF8)"dynamicclass"; pNewMD->m_pszDebugMethodSignature = "DynamicMethod Signature not available"; #endif // _DEBUG #ifdef HAVE_GCCOVER pNewMD->m_GcCover = NULL; #endif pNewMD->SetNotInline(TRUE); pNewMD->GetLCGMethodResolver()->Reset(); RETURN pNewMD; } void DynamicMethodTable::LinkMethod(DynamicMethodDesc *pMethod) { CONTRACT_VOID { NOTHROW; GC_TRIGGERS; MODE_ANY; PRECONDITION(CheckPointer(pMethod)); } CONTRACT_END; LOG((LF_BCL, LL_INFO10000, "Level4 - Returning DynamicMethod to free list {0x%p} (used %d)\n", pMethod, m_Used)); { LockHolder lh(this); pMethod->GetLCGMethodResolver()->m_next = m_DynamicMethodList; m_DynamicMethodList = pMethod; #ifdef _DEBUG m_Used--; #endif } RETURN; } // // CodeHeap implementation // HeapList* HostCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, EEJitManager *pJitManager) { CONTRACT (HeapList*) { THROWS; GC_NOTRIGGER; MODE_ANY; INJECT_FAULT(COMPlusThrowOM()); POSTCONDITION((RETVAL != NULL) || !pInfo->getThrowOnOutOfMemoryWithinRange()); } CONTRACT_END; NewHolder<HostCodeHeap> pCodeHeap(new HostCodeHeap(pJitManager)); HeapList *pHp = pCodeHeap->InitializeHeapList(pInfo); if (pHp == NULL) { _ASSERTE(!pInfo->getThrowOnOutOfMemoryWithinRange()); RETURN NULL; } LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap creation {0x%p} - base addr 0x%p, size available 0x%p, nibble map ptr 0x%p\n", (HostCodeHeap*)pCodeHeap, pCodeHeap->m_pBaseAddr, pCodeHeap->m_TotalBytesAvailable, pCodeHeap->m_pHeapList->pHdrMap)); pCodeHeap.SuppressRelease(); LOG((LF_BCL, LL_INFO10, "Level1 - CodeHeap created {0x%p}\n", (HostCodeHeap*)pCodeHeap)); RETURN pHp; } HostCodeHeap::HostCodeHeap(EEJitManager *pJitManager) { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; INJECT_FAULT(COMPlusThrowOM()); } CONTRACTL_END; m_pBaseAddr = NULL; m_pLastAvailableCommittedAddr = NULL; m_TotalBytesAvailable = 0; m_ApproximateLargestBlock = 0; m_AllocationCount = 0; m_pHeapList = NULL; m_pJitManager = (PTR_EEJitManager)pJitManager; m_pFreeList = NULL; m_pAllocator = NULL; m_pNextHeapToRelease = NULL; } HostCodeHeap::~HostCodeHeap() { LIMITED_METHOD_CONTRACT; if (m_pHeapList != NULL && m_pHeapList->pHdrMap != NULL) delete[] m_pHeapList->pHdrMap; if (m_pBaseAddr) ExecutableAllocator::Instance()->Release(m_pBaseAddr); LOG((LF_BCL, LL_INFO10, "Level1 - CodeHeap destroyed {0x%p}\n", this)); } HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; size_t ReserveBlockSize = pInfo->getRequestSize(); // Add TrackAllocation, HeapList and very conservative padding to make sure we have enough for the allocation ReserveBlockSize += sizeof(TrackAllocation) + HOST_CODEHEAP_SIZE_ALIGN + 0x100; #if defined(TARGET_AMD64) || defined(TARGET_ARM64) ReserveBlockSize += JUMP_ALLOCATE_SIZE; #endif // reserve ReserveBlockSize rounded-up to VIRTUAL_ALLOC_RESERVE_GRANULARITY of memory ReserveBlockSize = ALIGN_UP(ReserveBlockSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY); if (pInfo->m_loAddr != NULL || pInfo->m_hiAddr != NULL) { m_pBaseAddr = (BYTE*)ExecutableAllocator::Instance()->ReserveWithinRange(ReserveBlockSize, pInfo->m_loAddr, pInfo->m_hiAddr); if (!m_pBaseAddr) { if (pInfo->getThrowOnOutOfMemoryWithinRange()) ThrowOutOfMemoryWithinRange(); return NULL; } } else { // top up the ReserveBlockSize to suggested minimum ReserveBlockSize = max(ReserveBlockSize, pInfo->getReserveSize()); m_pBaseAddr = (BYTE*)ExecutableAllocator::Instance()->Reserve(ReserveBlockSize); if (!m_pBaseAddr) ThrowOutOfMemory(); } m_pLastAvailableCommittedAddr = m_pBaseAddr; m_TotalBytesAvailable = ReserveBlockSize; m_ApproximateLargestBlock = ReserveBlockSize; m_pAllocator = pInfo->m_pAllocator; HeapList* pHp = new HeapList; TrackAllocation *pTracker = NULL; #if defined(TARGET_AMD64) || defined(TARGET_ARM64) pTracker = AllocMemory_NoThrow(0, JUMP_ALLOCATE_SIZE, sizeof(void*), 0); if (pTracker == NULL) { // This should only ever happen with fault injection _ASSERTE(g_pConfig->ShouldInjectFault(INJECTFAULT_DYNAMICCODEHEAP)); delete pHp; ThrowOutOfMemory(); } pHp->CLRPersonalityRoutine = (BYTE *)(pTracker + 1); #endif pHp->hpNext = NULL; pHp->pHeap = (PTR_CodeHeap)this; // wire it back m_pHeapList = (PTR_HeapList)pHp; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap creation {0x%p} - size available 0x%p, private data ptr [0x%p, 0x%p]\n", (HostCodeHeap*)this, m_TotalBytesAvailable, pTracker, pTracker->size)); // It is important to exclude the CLRPersonalityRoutine from the tracked range pHp->startAddress = dac_cast<TADDR>(m_pBaseAddr) + (pTracker ? pTracker->size : 0); pHp->mapBase = ROUND_DOWN_TO_PAGE(pHp->startAddress); // round down to next lower page align pHp->pHdrMap = NULL; pHp->endAddress = pHp->startAddress; pHp->maxCodeHeapSize = m_TotalBytesAvailable - (pTracker ? pTracker->size : 0); pHp->reserveForJumpStubs = 0; #ifdef HOST_64BIT ExecutableWriterHolder<BYTE> personalityRoutineWriterHolder(pHp->CLRPersonalityRoutine, 12); emitJump(pHp->CLRPersonalityRoutine, personalityRoutineWriterHolder.GetRW(), (void *)ProcessCLRException); #endif size_t nibbleMapSize = HEAP2MAPSIZE(ROUND_UP_TO_PAGE(pHp->maxCodeHeapSize)); pHp->pHdrMap = new DWORD[nibbleMapSize / sizeof(DWORD)]; ZeroMemory(pHp->pHdrMap, nibbleMapSize); return pHp; } HostCodeHeap::TrackAllocation* HostCodeHeap::AllocFromFreeList(size_t header, size_t size, DWORD alignment, size_t reserveForJumpStubs) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; if (m_pFreeList) { LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Alloc size corrected 0x%X for free list\n", this, size)); // walk the list looking for a block with enough capacity TrackAllocation *pCurrent = m_pFreeList; TrackAllocation *pPrevious = NULL; while (pCurrent) { BYTE* pPointer = ALIGN_UP((BYTE*)(pCurrent + 1) + header, alignment); size_t realSize = ALIGN_UP(pPointer + size, sizeof(void*)) - (BYTE*)pCurrent; if (pCurrent->size >= realSize + reserveForJumpStubs) { // found a block LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Block found, size 0x%X\n", this, pCurrent->size)); ExecutableWriterHolder<TrackAllocation> previousWriterHolder; if (pPrevious) { previousWriterHolder = ExecutableWriterHolder<TrackAllocation>(pPrevious, sizeof(TrackAllocation)); } ExecutableWriterHolder<TrackAllocation> currentWriterHolder(pCurrent, sizeof(TrackAllocation)); // The space left is not big enough for a new block, let's just // update the TrackAllocation record for the current block if (pCurrent->size - realSize < max(HOST_CODEHEAP_SIZE_ALIGN, sizeof(TrackAllocation))) { LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Item removed %p, size 0x%X\n", this, pCurrent, pCurrent->size)); // remove current if (pPrevious) { previousWriterHolder.GetRW()->pNext = pCurrent->pNext; } else { m_pFreeList = pCurrent->pNext; } } else { // create a new TrackAllocation after the memory we just allocated and insert it into the free list TrackAllocation *pNewCurrent = (TrackAllocation*)((BYTE*)pCurrent + realSize); ExecutableWriterHolder<TrackAllocation> newCurrentWriterHolder(pNewCurrent, sizeof(TrackAllocation)); newCurrentWriterHolder.GetRW()->pNext = pCurrent->pNext; newCurrentWriterHolder.GetRW()->size = pCurrent->size - realSize; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Item changed %p, new size 0x%X\n", this, pNewCurrent, pNewCurrent->size)); if (pPrevious) { previousWriterHolder.GetRW()->pNext = pNewCurrent; } else { m_pFreeList = pNewCurrent; } // We only need to update the size of the current block if we are creating a new block currentWriterHolder.GetRW()->size = realSize; } currentWriterHolder.GetRW()->pHeap = this; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Allocation returned %p, size 0x%X - data -> %p\n", this, pCurrent, pCurrent->size, pPointer)); return pCurrent; } pPrevious = pCurrent; pCurrent = pCurrent->pNext; } } LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - No block in free list for size 0x%X\n", this, size)); return NULL; } void HostCodeHeap::AddToFreeList(TrackAllocation *pBlockToInsert, TrackAllocation *pBlockToInsertRW) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Add to FreeList [%p, 0x%X]\n", this, pBlockToInsert, pBlockToInsert->size)); // append to the list in the proper position and coalesce if needed if (m_pFreeList) { TrackAllocation *pCurrent = m_pFreeList; TrackAllocation *pPrevious = NULL; while (pCurrent) { if (pCurrent > pBlockToInsert) { // found the point of insertion pBlockToInsertRW->pNext = pCurrent; ExecutableWriterHolder<TrackAllocation> previousWriterHolder; if (pPrevious) { previousWriterHolder = ExecutableWriterHolder<TrackAllocation>(pPrevious, sizeof(TrackAllocation)); previousWriterHolder.GetRW()->pNext = pBlockToInsert; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Insert block [%p, 0x%X] -> [%p, 0x%X] -> [%p, 0x%X]\n", this, pPrevious, pPrevious->size, pBlockToInsert, pBlockToInsert->size, pCurrent, pCurrent->size)); } else { m_pFreeList = pBlockToInsert; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Insert block [%p, 0x%X] to head\n", this, pBlockToInsert, pBlockToInsert->size)); } // check for coalescing if ((BYTE*)pBlockToInsert + pBlockToInsert->size == (BYTE*)pCurrent) { // coalesce with next LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Coalesce block [%p, 0x%X] with [%p, 0x%X] - new size 0x%X\n", this, pBlockToInsert, pBlockToInsert->size, pCurrent, pCurrent->size, pCurrent->size + pBlockToInsert->size)); pBlockToInsertRW->pNext = pCurrent->pNext; pBlockToInsertRW->size += pCurrent->size; } if (pPrevious && (BYTE*)pPrevious + pPrevious->size == (BYTE*)pBlockToInsert) { // coalesce with previous LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Coalesce block [%p, 0x%X] with [%p, 0x%X] - new size 0x%X\n", this, pPrevious, pPrevious->size, pBlockToInsert, pBlockToInsert->size, pPrevious->size + pBlockToInsert->size)); previousWriterHolder.GetRW()->pNext = pBlockToInsert->pNext; previousWriterHolder.GetRW()->size += pBlockToInsert->size; } return; } pPrevious = pCurrent; pCurrent = pCurrent->pNext; } _ASSERTE(pPrevious && pCurrent == NULL); pBlockToInsertRW->pNext = NULL; // last in the list ExecutableWriterHolder<TrackAllocation> previousWriterHolder2(pPrevious, sizeof(TrackAllocation)); if ((BYTE*)pPrevious + pPrevious->size == (BYTE*)pBlockToInsert) { // coalesce with previous LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Coalesce block [%p, 0x%X] with [%p, 0x%X] - new size 0x%X\n", this, pPrevious, pPrevious->size, pBlockToInsert, pBlockToInsert->size, pPrevious->size + pBlockToInsert->size)); previousWriterHolder2.GetRW()->size += pBlockToInsert->size; } else { previousWriterHolder2.GetRW()->pNext = pBlockToInsert; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Insert block [%p, 0x%X] to end after [%p, 0x%X]\n", this, pBlockToInsert, pBlockToInsert->size, pPrevious, pPrevious->size)); } return; } // first in the list pBlockToInsertRW->pNext = m_pFreeList; m_pFreeList = pBlockToInsert; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Insert block [%p, 0x%X] to head\n", this, m_pFreeList, m_pFreeList->size)); } void* HostCodeHeap::AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment, size_t reserveForJumpStubs) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; _ASSERTE(header == sizeof(CodeHeader)); _ASSERTE(alignment <= HOST_CODEHEAP_SIZE_ALIGN); // The code allocator has to guarantee that there is only one entrypoint per nibble map entry. // It is guaranteed because of HostCodeHeap allocator always aligns the size up to HOST_CODEHEAP_SIZE_ALIGN, // and because the size of nibble map entries (BYTES_PER_BUCKET) is smaller than HOST_CODEHEAP_SIZE_ALIGN. // Assert the later fact here. _ASSERTE(HOST_CODEHEAP_SIZE_ALIGN >= BYTES_PER_BUCKET); header += sizeof(TrackAllocation*); TrackAllocation* pTracker = AllocMemory_NoThrow(header, size, alignment, reserveForJumpStubs); if (pTracker == NULL) return NULL; BYTE * pCode = ALIGN_UP((BYTE*)(pTracker + 1) + header, alignment); // Pointer to the TrackAllocation record is stored just before the code header CodeHeader * pHdr = (CodeHeader *)pCode - 1; ExecutableWriterHolder<TrackAllocation *> trackerWriterHolder((TrackAllocation **)(pHdr) - 1, sizeof(TrackAllocation *)); *trackerWriterHolder.GetRW() = pTracker; _ASSERTE(pCode + size <= (BYTE*)pTracker + pTracker->size); // ref count the whole heap m_AllocationCount++; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - ref count %d\n", this, m_AllocationCount)); return pCode; } HostCodeHeap::TrackAllocation* HostCodeHeap::AllocMemory_NoThrow(size_t header, size_t size, DWORD alignment, size_t reserveForJumpStubs) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; #ifdef _DEBUG if (g_pConfig->ShouldInjectFault(INJECTFAULT_DYNAMICCODEHEAP)) { char *a = new (nothrow) char; if (a == NULL) return NULL; delete a; } #endif // _DEBUG // Skip walking the free list if the cached size of the largest block is not enough size_t totalRequiredSize = ALIGN_UP(sizeof(TrackAllocation) + header + size + (alignment - 1) + reserveForJumpStubs, sizeof(void*)); if (totalRequiredSize > m_ApproximateLargestBlock) return NULL; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Allocation requested 0x%X\n", this, size)); TrackAllocation* pTracker = AllocFromFreeList(header, size, alignment, reserveForJumpStubs); if (!pTracker) { // walk free list to end to find available space size_t availableInFreeList = 0; TrackAllocation *pCurrentBlock = m_pFreeList; TrackAllocation *pLastBlock = NULL; while (pCurrentBlock) { pLastBlock = pCurrentBlock; pCurrentBlock = pCurrentBlock->pNext; } if (pLastBlock && (BYTE*)pLastBlock + pLastBlock->size == m_pLastAvailableCommittedAddr) { availableInFreeList = pLastBlock->size; } _ASSERTE(totalRequiredSize > availableInFreeList); size_t sizeToCommit = totalRequiredSize - availableInFreeList; sizeToCommit = ROUND_UP_TO_PAGE(sizeToCommit); if (m_pLastAvailableCommittedAddr + sizeToCommit <= m_pBaseAddr + m_TotalBytesAvailable) { if (NULL == ExecutableAllocator::Instance()->Commit(m_pLastAvailableCommittedAddr, sizeToCommit, true /* isExecutable */)) { LOG((LF_BCL, LL_ERROR, "CodeHeap [0x%p] - VirtualAlloc failed\n", this)); return NULL; } TrackAllocation *pBlockToInsert = (TrackAllocation*)(void*)m_pLastAvailableCommittedAddr; ExecutableWriterHolder<TrackAllocation> blockToInsertWriterHolder(pBlockToInsert, sizeof(TrackAllocation)); blockToInsertWriterHolder.GetRW()->pNext = NULL; blockToInsertWriterHolder.GetRW()->size = sizeToCommit; m_pLastAvailableCommittedAddr += sizeToCommit; AddToFreeList(pBlockToInsert, blockToInsertWriterHolder.GetRW()); pTracker = AllocFromFreeList(header, size, alignment, reserveForJumpStubs); _ASSERTE(pTracker != NULL); } else { LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - allocation failed:\n\tm_pLastAvailableCommittedAddr: 0x%X\n\tsizeToCommit: 0x%X\n\tm_pBaseAddr: 0x%X\n\tm_TotalBytesAvailable: 0x%X\n", this, m_pLastAvailableCommittedAddr, sizeToCommit, m_pBaseAddr, m_TotalBytesAvailable)); // Update largest available block size m_ApproximateLargestBlock = totalRequiredSize - 1; } } return pTracker; } #endif //!DACCESS_COMPILE #ifdef DACCESS_COMPILE void HostCodeHeap::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { WRAPPER_NO_CONTRACT; DAC_ENUM_DTHIS(); TADDR addr = dac_cast<TADDR>(m_pBaseAddr); size_t size = dac_cast<TADDR>(m_pLastAvailableCommittedAddr) - addr; #if (_DEBUG) // Test hook: when testing on debug builds, we want an easy way to test that the while // correctly terminates in the face of ridiculous stuff from the target. if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DumpGeneration_IntentionallyCorruptDataFromTarget) == 1) { // Pretend the object is very large. size |= 0xf0000000; } #endif // (_DEBUG) while (size) { ULONG32 enumSize; if (size > 0x80000000) { enumSize = 0x80000000; } else { enumSize = (ULONG32)size; } // If we can't read the target memory, stop immediately so we don't work // with broken data. if (!DacEnumMemoryRegion(addr, enumSize)) break; addr += enumSize; size -= enumSize; } } #endif // DACCESS_COMPILE // static struct HostCodeHeap::TrackAllocation * HostCodeHeap::GetTrackAllocation(TADDR codeStart) { LIMITED_METHOD_CONTRACT; CodeHeader * pHdr = dac_cast<PTR_CodeHeader>(PCODEToPINSTR(codeStart)) - 1; // Pointer to the TrackAllocation record is stored just before the code header return *((TrackAllocation **)(pHdr) - 1); } HostCodeHeap* HostCodeHeap::GetCodeHeap(TADDR codeStart) { WRAPPER_NO_CONTRACT; return HostCodeHeap::GetTrackAllocation(codeStart)->pHeap; } #ifndef DACCESS_COMPILE void HostCodeHeap::FreeMemForCode(void * codeStart) { LIMITED_METHOD_CONTRACT; TrackAllocation *pTracker = HostCodeHeap::GetTrackAllocation((TADDR)codeStart); ExecutableWriterHolder<TrackAllocation> trackerWriterHolder(pTracker, sizeof(TrackAllocation)); AddToFreeList(pTracker, trackerWriterHolder.GetRW()); m_ApproximateLargestBlock += pTracker->size; m_AllocationCount--; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap released [0x%p, vt(0x%x)] - ref count %d\n", this, *(size_t*)this, m_AllocationCount)); if (m_AllocationCount == 0) { m_pJitManager->AddToCleanupList(this); } } // // Implementation for DynamicMethodDesc declared in method.hpp // void DynamicMethodDesc::Destroy() { CONTRACTL { THROWS; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; _ASSERTE(IsDynamicMethod()); LoaderAllocator *pLoaderAllocator = GetLoaderAllocator(); LOG((LF_BCL, LL_INFO1000, "Level3 - Destroying DynamicMethod {0x%p}\n", this)); // The m_pSig and m_pszMethodName need to be destroyed after the GetLCGMethodResolver()->Destroy() call // otherwise the EEJitManager::CodeHeapIterator could return DynamicMethodDesc with these members NULLed, but // the nibble map for the corresponding code memory indicating that this DynamicMethodDesc is still alive. PCODE pSig = m_pSig; PTR_CUTF8 pszMethodName = m_pszMethodName; GetLCGMethodResolver()->Destroy(); // The current DynamicMethodDesc storage is destroyed at this point if (pszMethodName != NULL) { delete[] pszMethodName; } if (pSig != NULL) { delete[] (BYTE*)pSig; } if (pLoaderAllocator->IsCollectible()) { if (pLoaderAllocator->Release()) { GCX_PREEMP(); LoaderAllocator::GCLoaderAllocators(pLoaderAllocator); } } } // // The resolver object is reused when the method is destroyed, // this will reset its state for the next use. // void LCGMethodResolver::Reset() { m_DynamicStringLiterals = NULL; m_recordCodePointer = NULL; m_UsedIndCellList = NULL; m_pJumpStubCache = NULL; m_next = NULL; m_Code = NULL; } // // Recycle all the indcells in m_UsedIndCellList by adding them to the free list // void LCGMethodResolver::RecycleIndCells() { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; // Append the list of indirection cells used by this dynamic method to the free list IndCellList * list = m_UsedIndCellList; if (list) { BYTE * cellhead = list->indcell; BYTE * cellprev = NULL; BYTE * cellcurr = NULL; // Build a linked list of indirection cells from m_UsedIndCellList. // No need to lock newlist because this method is only called during the finalization of // DynamicResolver.DestroyScout and at that time no one else should be modifying m_UsedIndCellList. while (list) { cellcurr = list->indcell; _ASSERTE(cellcurr != NULL); if (cellprev) *((BYTE**)cellprev) = cellcurr; list = list->pNext; cellprev = cellcurr; } // Insert the linked list to the free list of the VirtualCallStubManager of the current domain. // We should use GetLoaderAllocator because that is where the ind cell was allocated. LoaderAllocator *pLoaderAllocator = GetDynamicMethod()->GetLoaderAllocator(); VirtualCallStubManager *pMgr = pLoaderAllocator->GetVirtualCallStubManager(); pMgr->InsertIntoRecycledIndCellList_Locked(cellhead, cellcurr); m_UsedIndCellList = NULL; } } void LCGMethodResolver::Destroy() { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; LOG((LF_BCL, LL_INFO100, "Level2 - Resolver - Destroying Resolver {0x%p}\n", this)); if (m_Code) { delete[] m_Code; m_Code = NULL; } m_CodeSize = 0; if (!m_LocalSig.IsNull()) { delete[] m_LocalSig.GetPtr(); m_LocalSig = SigPointer(); } // Get the global string literal interning map GlobalStringLiteralMap* pStringLiteralMap = SystemDomain::GetGlobalStringLiteralMapNoCreate(); // release references to all the string literals used in this Dynamic Method if (pStringLiteralMap != NULL) { // lock the global string literal interning map // we cannot use GetGlobalStringLiteralMap() here because it might throw CrstHolder gch(pStringLiteralMap->GetHashTableCrstGlobal()); // Access to m_DynamicStringLiterals doesn't need to be syncrhonized because // this can be run in only one thread: the finalizer thread. while (m_DynamicStringLiterals != NULL) { m_DynamicStringLiterals->m_pEntry->Release(); m_DynamicStringLiterals = m_DynamicStringLiterals->m_pNext; } } // Note that we need to do this before m_jitTempData is deleted RecycleIndCells(); m_jitMetaHeap.Delete(); m_jitTempData.Delete(); if (m_recordCodePointer) { #if defined(TARGET_AMD64) // Remove the unwind information (if applicable) UnwindInfoTable::UnpublishUnwindInfoForMethod((TADDR)m_recordCodePointer); #endif // defined(TARGET_AMD64) HostCodeHeap *pHeap = HostCodeHeap::GetCodeHeap((TADDR)m_recordCodePointer); LOG((LF_BCL, LL_INFO1000, "Level3 - Resolver {0x%p} - Release reference to heap {%p, vt(0x%x)} \n", this, pHeap, *(size_t*)pHeap)); pHeap->m_pJitManager->FreeCodeMemory(pHeap, m_recordCodePointer); m_recordCodePointer = NULL; } if (m_pJumpStubCache != NULL) { JumpStubBlockHeader* current = m_pJumpStubCache->m_pBlocks; while (current) { JumpStubBlockHeader* next = current->m_next; HostCodeHeap *pHeap = current->GetHostCodeHeap(); LOG((LF_BCL, LL_INFO1000, "Level3 - Resolver {0x%p} - Release reference to heap {%p, vt(0x%x)} \n", current, pHeap, *(size_t*)pHeap)); pHeap->m_pJitManager->FreeCodeMemory(pHeap, current); current = next; } m_pJumpStubCache->m_pBlocks = NULL; delete m_pJumpStubCache; m_pJumpStubCache = NULL; } if (m_managedResolver) { ::DestroyLongWeakHandle(m_managedResolver); m_managedResolver = NULL; } m_DynamicMethodTable->LinkMethod(m_pDynamicMethod); } void LCGMethodResolver::FreeCompileTimeState() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; //m_jitTempData.Delete(); } void LCGMethodResolver::GetJitContext(SecurityControlFlags * securityControlFlags, TypeHandle *typeOwner) { CONTRACTL { STANDARD_VM_CHECK; PRECONDITION(CheckPointer(securityControlFlags)); PRECONDITION(CheckPointer(typeOwner)); } CONTRACTL_END; GCX_COOP(); MethodDescCallSite getJitContext(METHOD__RESOLVER__GET_JIT_CONTEXT, m_managedResolver); OBJECTREF resolver = ObjectFromHandle(m_managedResolver); _ASSERTE(resolver); // gc root must be up the stack ARG_SLOT args[] = { ObjToArgSlot(resolver), PtrToArgSlot(securityControlFlags), }; REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)getJitContext.Call_RetOBJECTREF(args); *typeOwner = refType != NULL ? refType->GetType() : TypeHandle(); } ChunkAllocator* LCGMethodResolver::GetJitMetaHeap() { LIMITED_METHOD_CONTRACT; return &m_jitMetaHeap; } BYTE* LCGMethodResolver::GetCodeInfo(unsigned *pCodeSize, unsigned *pStackSize, CorInfoOptions *pOptions, unsigned *pEHSize) { STANDARD_VM_CONTRACT; _ASSERTE(pCodeSize); if (!m_Code) { GCX_COOP(); LOG((LF_BCL, LL_INFO100000, "Level5 - DM-JIT: Getting CodeInfo on resolver 0x%p...\n", this)); // get the code - Byte[] Resolver.GetCodeInfo(ref ushort stackSize, ref int EHCount) MethodDescCallSite getCodeInfo(METHOD__RESOLVER__GET_CODE_INFO, m_managedResolver); OBJECTREF resolver = ObjectFromHandle(m_managedResolver); VALIDATEOBJECTREF(resolver); // gc root must be up the stack int32_t stackSize = 0, initLocals = 0, EHSize = 0; ARG_SLOT args[] = { ObjToArgSlot(resolver), PtrToArgSlot(&stackSize), PtrToArgSlot(&initLocals), PtrToArgSlot(&EHSize), }; U1ARRAYREF dataArray = (U1ARRAYREF) getCodeInfo.Call_RetOBJECTREF(args); DWORD codeSize = dataArray->GetNumComponents(); NewArrayHolder<BYTE> code(new BYTE[codeSize]); memcpy(code, dataArray->GetDataPtr(), codeSize); m_CodeSize = codeSize; _ASSERTE(FitsIn<unsigned short>(stackSize)); m_StackSize = static_cast<unsigned short>(stackSize); m_Options = (initLocals) ? CORINFO_OPT_INIT_LOCALS : (CorInfoOptions)0; _ASSERTE(FitsIn<unsigned short>(EHSize)); m_EHSize = static_cast<unsigned short>(EHSize); m_Code = (BYTE*)code; code.SuppressRelease(); LOG((LF_BCL, LL_INFO100000, "Level5 - DM-JIT: CodeInfo {0x%p} on resolver %p\n", m_Code, this)); } *pCodeSize = m_CodeSize; if (pStackSize) *pStackSize = m_StackSize; if (pOptions) *pOptions = m_Options; if (pEHSize) *pEHSize = m_EHSize; return m_Code; } //--------------------------------------------------------------------------------------- // SigPointer LCGMethodResolver::GetLocalSig() { STANDARD_VM_CONTRACT; if (m_LocalSig.IsNull()) { GCX_COOP(); LOG((LF_BCL, LL_INFO100000, "Level5 - DM-JIT: Getting LocalSig on resolver 0x%p...\n", this)); MethodDescCallSite getLocalsSignature(METHOD__RESOLVER__GET_LOCALS_SIGNATURE, m_managedResolver); OBJECTREF resolver = ObjectFromHandle(m_managedResolver); VALIDATEOBJECTREF(resolver); // gc root must be up the stack ARG_SLOT args[] = { ObjToArgSlot(resolver) }; U1ARRAYREF dataArray = (U1ARRAYREF) getLocalsSignature.Call_RetOBJECTREF(args); DWORD localSigSize = dataArray->GetNumComponents(); NewArrayHolder<COR_SIGNATURE> localSig(new COR_SIGNATURE[localSigSize]); memcpy((void *)localSig, dataArray->GetDataPtr(), localSigSize); m_LocalSig = SigPointer((PCCOR_SIGNATURE)localSig, localSigSize); localSig.SuppressRelease(); LOG((LF_BCL, LL_INFO100000, "Level5 - DM-JIT: LocalSig {0x%p} on resolver %p\n", m_LocalSig.GetPtr(), this)); } return m_LocalSig; } // LCGMethodResolver::GetLocalSig //--------------------------------------------------------------------------------------- // OBJECTHANDLE LCGMethodResolver::ConstructStringLiteral(mdToken metaTok) { STANDARD_VM_CONTRACT; GCX_COOP(); OBJECTHANDLE string = NULL; STRINGREF strRef = GetStringLiteral(metaTok); GCPROTECT_BEGIN(strRef); if (strRef != NULL) { // Instead of storing the string literal in the appdomain specific string literal map, // we store it in the dynamic method specific string liternal list // This way we can release it when the dynamic method is collected. string = (OBJECTHANDLE)GetOrInternString(&strRef); } GCPROTECT_END(); return string; } //--------------------------------------------------------------------------------------- // BOOL LCGMethodResolver::IsValidStringRef(mdToken metaTok) { STANDARD_VM_CONTRACT; GCX_COOP(); return GetStringLiteral(metaTok) != NULL; } int LCGMethodResolver::GetStringLiteralLength(mdToken metaTok) { STANDARD_VM_CONTRACT; GCX_COOP(); STRINGREF str = GetStringLiteral(metaTok); if (str != NULL) { return str->GetStringLength(); } return -1; } //--------------------------------------------------------------------------------------- // STRINGREF LCGMethodResolver::GetStringLiteral( mdToken token) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; MethodDescCallSite getStringLiteral(METHOD__RESOLVER__GET_STRING_LITERAL, m_managedResolver); OBJECTREF resolver = ObjectFromHandle(m_managedResolver); VALIDATEOBJECTREF(resolver); // gc root must be up the stack ARG_SLOT args[] = { ObjToArgSlot(resolver), token, }; return getStringLiteral.Call_RetSTRINGREF(args); } // This method will get the interned string by calling GetInternedString on the // global string liternal interning map. It will also store the returned entry // in m_DynamicStringLiterals STRINGREF* LCGMethodResolver::GetOrInternString(STRINGREF *pProtectedStringRef) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(CheckPointer(pProtectedStringRef)); } CONTRACTL_END; // Get the global string literal interning map GlobalStringLiteralMap* pStringLiteralMap = SystemDomain::GetGlobalStringLiteralMap(); // Calculating the hash: EEUnicodeHashTableHelper::GetHash EEStringData StringData = EEStringData((*pProtectedStringRef)->GetStringLength(), (*pProtectedStringRef)->GetBuffer()); DWORD dwHash = pStringLiteralMap->GetHash(&StringData); // lock the global string literal interning map CrstHolder gch(pStringLiteralMap->GetHashTableCrstGlobal()); StringLiteralEntryHolder pEntry(pStringLiteralMap->GetInternedString(pProtectedStringRef, dwHash, /* bAddIfNotFound */ TRUE)); DynamicStringLiteral* pStringLiteral = (DynamicStringLiteral*)m_jitTempData.New(sizeof(DynamicStringLiteral)); pStringLiteral->m_pEntry = pEntry.Extract(); // Add to m_DynamicStringLiterals: // we don't need to check for duplicate because the string literal entries in // the global string literal map are ref counted. pStringLiteral->m_pNext = m_DynamicStringLiterals; m_DynamicStringLiterals = pStringLiteral; return pStringLiteral->m_pEntry->GetStringObject(); } // AddToUsedIndCellList adds a IndCellList link to the beginning of m_UsedIndCellList. It is called by // code:CEEInfo::getCallInfo when a indirection cell is allocated for m_pDynamicMethod. // All the indirection cells usded by m_pDynamicMethod will be recycled when this resolver // is finalized, see code:LCGMethodResolver::RecycleIndCells void LCGMethodResolver::AddToUsedIndCellList(BYTE * indcell) { CONTRACTL { STANDARD_VM_CHECK; PRECONDITION(CheckPointer(indcell)); } CONTRACTL_END; IndCellList * link = (IndCellList *)m_jitTempData.New(sizeof(IndCellList)); link->indcell = indcell; // Insert into m_UsedIndCellList while (true) { link->pNext = m_UsedIndCellList; if (InterlockedCompareExchangeT(&m_UsedIndCellList, link, link->pNext) == link->pNext) break; } } void LCGMethodResolver::ResolveToken(mdToken token, TypeHandle * pTH, MethodDesc ** ppMD, FieldDesc ** ppFD) { STANDARD_VM_CONTRACT; GCX_COOP(); PREPARE_SIMPLE_VIRTUAL_CALLSITE(METHOD__RESOLVER__RESOLVE_TOKEN, ObjectFromHandle(m_managedResolver)); DECLARE_ARGHOLDER_ARRAY(args, 5); args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(ObjectFromHandle(m_managedResolver)); args[ARGNUM_1] = DWORD_TO_ARGHOLDER(token); args[ARGNUM_2] = pTH; args[ARGNUM_3] = ppMD; args[ARGNUM_4] = ppFD; CALL_MANAGED_METHOD_NORET(args); _ASSERTE(*ppMD == NULL || *ppFD == NULL); if (pTH->IsNull()) { if (*ppMD != NULL) *pTH = (*ppMD)->GetMethodTable(); else if (*ppFD != NULL) *pTH = (*ppFD)->GetEnclosingMethodTable(); } _ASSERTE(!pTH->IsNull()); } //--------------------------------------------------------------------------------------- // SigPointer LCGMethodResolver::ResolveSignature( mdToken token) { STANDARD_VM_CONTRACT; GCX_COOP(); U1ARRAYREF dataArray = NULL; PREPARE_SIMPLE_VIRTUAL_CALLSITE(METHOD__RESOLVER__RESOLVE_SIGNATURE, ObjectFromHandle(m_managedResolver)); DECLARE_ARGHOLDER_ARRAY(args, 3); args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(ObjectFromHandle(m_managedResolver)); args[ARGNUM_1] = DWORD_TO_ARGHOLDER(token); args[ARGNUM_2] = DWORD_TO_ARGHOLDER(0); CALL_MANAGED_METHOD_RETREF(dataArray, U1ARRAYREF, args); if (dataArray == NULL) COMPlusThrow(kInvalidProgramException); DWORD cbSig = dataArray->GetNumComponents(); PCCOR_SIGNATURE pSig = (PCCOR_SIGNATURE)m_jitTempData.New(cbSig); memcpy((void *)pSig, dataArray->GetDataPtr(), cbSig); return SigPointer(pSig, cbSig); } // LCGMethodResolver::ResolveSignature //--------------------------------------------------------------------------------------- // SigPointer LCGMethodResolver::ResolveSignatureForVarArg( mdToken token) { STANDARD_VM_CONTRACT; GCX_COOP(); U1ARRAYREF dataArray = NULL; PREPARE_SIMPLE_VIRTUAL_CALLSITE(METHOD__RESOLVER__RESOLVE_SIGNATURE, ObjectFromHandle(m_managedResolver)); DECLARE_ARGHOLDER_ARRAY(args, 3); args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(ObjectFromHandle(m_managedResolver)); args[ARGNUM_1] = DWORD_TO_ARGHOLDER(token); args[ARGNUM_2] = DWORD_TO_ARGHOLDER(1); CALL_MANAGED_METHOD_RETREF(dataArray, U1ARRAYREF, args); if (dataArray == NULL) COMPlusThrow(kInvalidProgramException); DWORD cbSig = dataArray->GetNumComponents(); PCCOR_SIGNATURE pSig = (PCCOR_SIGNATURE)m_jitTempData.New(cbSig); memcpy((void *)pSig, dataArray->GetDataPtr(), cbSig); return SigPointer(pSig, cbSig); } // LCGMethodResolver::ResolveSignatureForVarArg //--------------------------------------------------------------------------------------- // void LCGMethodResolver::GetEHInfo(unsigned EHnumber, CORINFO_EH_CLAUSE* clause) { STANDARD_VM_CONTRACT; GCX_COOP(); // attempt to get the raw EHInfo first { U1ARRAYREF dataArray; PREPARE_SIMPLE_VIRTUAL_CALLSITE(METHOD__RESOLVER__GET_RAW_EH_INFO, ObjectFromHandle(m_managedResolver)); DECLARE_ARGHOLDER_ARRAY(args, 1); args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(ObjectFromHandle(m_managedResolver)); CALL_MANAGED_METHOD_RETREF(dataArray, U1ARRAYREF, args); if (dataArray != NULL) { COR_ILMETHOD_SECT_EH* pEH = (COR_ILMETHOD_SECT_EH*)dataArray->GetDataPtr(); COR_ILMETHOD_SECT_EH_CLAUSE_FAT ehClause; const COR_ILMETHOD_SECT_EH_CLAUSE_FAT* ehInfo; ehInfo = (COR_ILMETHOD_SECT_EH_CLAUSE_FAT*)pEH->EHClause(EHnumber, &ehClause); clause->Flags = (CORINFO_EH_CLAUSE_FLAGS)ehInfo->GetFlags(); clause->TryOffset = ehInfo->GetTryOffset(); clause->TryLength = ehInfo->GetTryLength(); clause->HandlerOffset = ehInfo->GetHandlerOffset(); clause->HandlerLength = ehInfo->GetHandlerLength(); clause->ClassToken = ehInfo->GetClassToken(); clause->FilterOffset = ehInfo->GetFilterOffset(); return; } } // failed, get the info off the ilgenerator { PREPARE_SIMPLE_VIRTUAL_CALLSITE(METHOD__RESOLVER__GET_EH_INFO, ObjectFromHandle(m_managedResolver)); DECLARE_ARGHOLDER_ARRAY(args, 3); args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(ObjectFromHandle(m_managedResolver)); args[ARGNUM_1] = DWORD_TO_ARGHOLDER(EHnumber); args[ARGNUM_2] = PTR_TO_ARGHOLDER(clause); CALL_MANAGED_METHOD_NORET(args); } } #endif // !DACCESS_COMPILE // Get the associated managed resolver. This method will be called during a GC so it should not throw, trigger a GC or cause the // object in question to be validated. OBJECTREF LCGMethodResolver::GetManagedResolver() { LIMITED_METHOD_CONTRACT; return ObjectFromHandle(m_managedResolver); } // // ChunkAllocator implementation // ChunkAllocator::~ChunkAllocator() { LIMITED_METHOD_CONTRACT; Delete(); } void ChunkAllocator::Delete() { LIMITED_METHOD_CONTRACT; BYTE *next = NULL; LOG((LF_BCL, LL_INFO10, "Level1 - DM - Allocator [0x%p] - deleting...\n", this)); while (m_pData) { LOG((LF_BCL, LL_INFO10, "Level1 - DM - Allocator [0x%p] - delete block {0x%p}\n", this, m_pData)); next = ((BYTE**)m_pData)[0]; delete[] m_pData; m_pData = next; } } void* ChunkAllocator::New(size_t size) { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; // We need to align it, otherwise we might get DataMisalignedException on IA64 size = ALIGN_UP(size, sizeof(void *)); BYTE *pNewBlock = NULL; LOG((LF_BCL, LL_INFO100, "Level2 - DM - Allocator [0x%p] - allocation requested 0x%X, available 0x%X\n", this, size, (m_pData) ? ((size_t*)m_pData)[1] : 0)); if (m_pData) { // we may have room available size_t available = ((size_t*)m_pData)[1]; if (size <= available) { LOG((LF_BCL, LL_INFO100, "Level2 - DM - Allocator [0x%p] - reusing block {0x%p}\n", this, m_pData)); ((size_t*)m_pData)[1] = available - size; pNewBlock = (m_pData + CHUNK_SIZE - available); LOG((LF_BCL, LL_INFO100, "Level2 - DM - Allocator [0x%p] - ptr -> 0x%p, available 0x%X\n", this, pNewBlock, ((size_t*)m_pData)[1])); return pNewBlock; } } // no available - need to allocate a new buffer if (size + (sizeof(void*) * 2) < CHUNK_SIZE) { // make the allocation NewArrayHolder<BYTE> newBlock(new BYTE[CHUNK_SIZE]); pNewBlock = (BYTE*)newBlock; ((size_t*)pNewBlock)[1] = CHUNK_SIZE - size - (sizeof(void*) * 2); LOG((LF_BCL, LL_INFO10, "Level1 - DM - Allocator [0x%p] - new block {0x%p}\n", this, pNewBlock)); newBlock.SuppressRelease(); } else { // request bigger than default size this is going to be a single block NewArrayHolder<BYTE> newBlock(new BYTE[size + (sizeof(void*) * 2)]); pNewBlock = (BYTE*)newBlock; ((size_t*)pNewBlock)[1] = 0; // no available bytes left LOG((LF_BCL, LL_INFO10, "Level1 - DM - Allocator [0x%p] - new BIG block {0x%p}\n", this, pNewBlock)); newBlock.SuppressRelease(); } // all we have left to do is to link the block. // We leave at the top the block with more bytes available if (m_pData) { if (((size_t*)pNewBlock)[1] > ((size_t*)m_pData)[1]) { ((BYTE**)pNewBlock)[0] = m_pData; m_pData = pNewBlock; } else { ((BYTE**)pNewBlock)[0] = ((BYTE**)m_pData)[0]; ((BYTE**)m_pData)[0] = pNewBlock; } } else { // this is the first allocation m_pData = pNewBlock; ((BYTE**)m_pData)[0] = NULL; } pNewBlock += (sizeof(void*) * 2); LOG((LF_BCL, LL_INFO100, "Level2 - DM - Allocator [0x%p] - ptr -> 0x%p, available 0x%X\n", this, pNewBlock, ((size_t*)m_pData)[1])); return pNewBlock; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // #include "common.h" #include "dynamicmethod.h" #include "object.h" #include "method.hpp" #include "comdelegate.h" #include "field.h" #include "contractimpl.h" #include "nibblemapmacros.h" #include "stringliteralmap.h" #include "virtualcallstub.h" #ifndef DACCESS_COMPILE // get the method table for dynamic methods DynamicMethodTable* DomainAssembly::GetDynamicMethodTable() { CONTRACT (DynamicMethodTable*) { INSTANCE_CHECK; THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM()); POSTCONDITION(CheckPointer(m_pDynamicMethodTable)); } CONTRACT_END; if (!m_pDynamicMethodTable) DynamicMethodTable::CreateDynamicMethodTable(&m_pDynamicMethodTable, GetModule(), GetAppDomain()); RETURN m_pDynamicMethodTable; } void ReleaseDynamicMethodTable(DynamicMethodTable *pDynMT) { WRAPPER_NO_CONTRACT; if (pDynMT) { pDynMT->Destroy(); } } void DynamicMethodTable::CreateDynamicMethodTable(DynamicMethodTable **ppLocation, Module *pModule, AppDomain *pDomain) { CONTRACT_VOID { THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM()); PRECONDITION(CheckPointer(ppLocation)); PRECONDITION(CheckPointer(pModule)); POSTCONDITION(CheckPointer(*ppLocation)); } CONTRACT_END; AllocMemTracker amt; LoaderHeap* pHeap = pDomain->GetHighFrequencyHeap(); _ASSERTE(pHeap); if (*ppLocation) RETURN; DynamicMethodTable* pDynMT = (DynamicMethodTable*) amt.Track(pHeap->AllocMem(S_SIZE_T(sizeof(DynamicMethodTable)))); // Note: Memory allocated on loader heap is zero filled // memset((void*)pDynMT, 0, sizeof(DynamicMethodTable)); if (*ppLocation) RETURN; LOG((LF_BCL, LL_INFO100, "Level2 - Creating DynamicMethodTable {0x%p}...\n", pDynMT)); Holder<DynamicMethodTable*, DoNothing, ReleaseDynamicMethodTable> dynMTHolder(pDynMT); pDynMT->m_Crst.Init(CrstDynamicMT); pDynMT->m_Module = pModule; pDynMT->m_pDomain = pDomain; pDynMT->MakeMethodTable(&amt); if (*ppLocation) RETURN; if (FastInterlockCompareExchangePointer(ppLocation, pDynMT, NULL) != NULL) { LOG((LF_BCL, LL_INFO100, "Level2 - Another thread got here first - deleting DynamicMethodTable {0x%p}...\n", pDynMT)); RETURN; } dynMTHolder.SuppressRelease(); amt.SuppressRelease(); LOG((LF_BCL, LL_INFO10, "Level1 - DynamicMethodTable created {0x%p}...\n", pDynMT)); RETURN; } void DynamicMethodTable::MakeMethodTable(AllocMemTracker *pamTracker) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM()); } CONTRACTL_END; m_pMethodTable = CreateMinimalMethodTable(m_Module, m_pDomain->GetHighFrequencyHeap(), pamTracker); } void DynamicMethodTable::Destroy() { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; #if _DEBUG // This method should be called only for collectible types or for non-collectible ones // at the construction time when there are no DynamicMethodDesc instances added to the // DynamicMethodTable yet (from the DynamicMethodTable::CreateDynamicMethodTable in case // there were two threads racing to construct the instance for the thread that lost // the race) if (m_pMethodTable != NULL && !m_pMethodTable->GetLoaderAllocator()->IsCollectible()) { MethodTable::IntroducedMethodIterator it(m_pMethodTable); _ASSERTE(!it.IsValid()); } #endif m_Crst.Destroy(); LOG((LF_BCL, LL_INFO10, "Level1 - DynamicMethodTable destroyed {0x%p}\n", this)); } void DynamicMethodTable::AddMethodsToList() { CONTRACT_VOID { THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM()); } CONTRACT_END; AllocMemTracker amt; LoaderHeap* pHeap = m_pMethodTable->GetLoaderAllocator()->GetHighFrequencyHeap(); _ASSERTE(pHeap); // // allocate as many chunks as needed to hold the methods // MethodDescChunk* pChunk = MethodDescChunk::CreateChunk(pHeap, 0 /* one chunk of maximum size */, mcDynamic, TRUE /* fNonVtableSlot */, TRUE /* fNativeCodeSlot */, FALSE /* fComPlusCallInfo */, m_pMethodTable, &amt); if (m_DynamicMethodList) RETURN; int methodCount = pChunk->GetCount(); BYTE* pResolvers = (BYTE*)amt.Track(pHeap->AllocMem(S_SIZE_T(sizeof(LCGMethodResolver)) * S_SIZE_T(methodCount))); if (m_DynamicMethodList) RETURN; DynamicMethodDesc *pNewMD = (DynamicMethodDesc *)pChunk->GetFirstMethodDesc(); DynamicMethodDesc *pPrevMD = NULL; // now go through all the methods in the chunk and link them for(int i = 0; i < methodCount; i++) { _ASSERTE(pNewMD->GetClassification() == mcDynamic); pNewMD->SetMemberDef(0); pNewMD->SetSlot(MethodTable::NO_SLOT); // we can't ever use the slot for dynamic methods pNewMD->SetStatic(); pNewMD->InitializeFlags(DynamicMethodDesc::FlagPublic | DynamicMethodDesc::FlagStatic | DynamicMethodDesc::FlagIsLCGMethod); LCGMethodResolver* pResolver = new (pResolvers) LCGMethodResolver(); pResolver->m_pDynamicMethod = pNewMD; pResolver->m_DynamicMethodTable = this; pNewMD->m_pResolver = pResolver; pNewMD->SetTemporaryEntryPoint(m_pDomain->GetLoaderAllocator(), &amt); #ifdef _DEBUG pNewMD->m_pDebugMethodTable = m_pMethodTable; #endif if (pPrevMD) { pPrevMD->GetLCGMethodResolver()->m_next = pNewMD; } pPrevMD = pNewMD; pNewMD = (DynamicMethodDesc *)(dac_cast<TADDR>(pNewMD) + pNewMD->SizeOf()); pResolvers += sizeof(LCGMethodResolver); } if (m_DynamicMethodList) RETURN; { // publish method list and method table LockHolder lh(this); if (m_DynamicMethodList) RETURN; // publish the new method descs on the method table m_pMethodTable->GetClass()->AddChunk(pChunk); m_DynamicMethodList = (DynamicMethodDesc*)pChunk->GetFirstMethodDesc(); } amt.SuppressRelease(); } DynamicMethodDesc* DynamicMethodTable::GetDynamicMethod(BYTE *psig, DWORD sigSize, PTR_CUTF8 name) { CONTRACT (DynamicMethodDesc*) { INSTANCE_CHECK; THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM()); PRECONDITION(CheckPointer(psig)); PRECONDITION(sigSize > 0); POSTCONDITION(CheckPointer(RETVAL)); } CONTRACT_END; LOG((LF_BCL, LL_INFO10000, "Level4 - Getting DynamicMethod\n")); DynamicMethodDesc *pNewMD = NULL; for (;;) { { LockHolder lh(this); pNewMD = m_DynamicMethodList; if (pNewMD) { m_DynamicMethodList = pNewMD->GetLCGMethodResolver()->m_next; #ifdef _DEBUG m_Used++; #endif break; } } LOG((LF_BCL, LL_INFO1000, "Level4 - DynamicMethod unavailable\n")); // need to create more methoddescs AddMethodsToList(); } _ASSERTE(pNewMD != NULL); // Reset the method desc into pristine state // Note: Reset has THROWS contract since it may allocate jump stub. It will never throw here // since it will always reuse the existing jump stub. pNewMD->Reset(); LOG((LF_BCL, LL_INFO1000, "Level3 - DynamicMethod obtained {0x%p} (used %d)\n", pNewMD, m_Used)); // the store sig part of the method desc pNewMD->SetStoredMethodSig((PCCOR_SIGNATURE)psig, sigSize); // the dynamic part of the method desc pNewMD->m_pszMethodName = name; pNewMD->InitializeFlags(DynamicMethodDesc::FlagPublic | DynamicMethodDesc::FlagStatic | DynamicMethodDesc::FlagIsLCGMethod); #ifdef _DEBUG pNewMD->m_pszDebugMethodName = name; pNewMD->m_pszDebugClassName = (LPUTF8)"dynamicclass"; pNewMD->m_pszDebugMethodSignature = "DynamicMethod Signature not available"; #endif // _DEBUG #ifdef HAVE_GCCOVER pNewMD->m_GcCover = NULL; #endif pNewMD->SetNotInline(TRUE); pNewMD->GetLCGMethodResolver()->Reset(); RETURN pNewMD; } void DynamicMethodTable::LinkMethod(DynamicMethodDesc *pMethod) { CONTRACT_VOID { NOTHROW; GC_TRIGGERS; MODE_ANY; PRECONDITION(CheckPointer(pMethod)); } CONTRACT_END; LOG((LF_BCL, LL_INFO10000, "Level4 - Returning DynamicMethod to free list {0x%p} (used %d)\n", pMethod, m_Used)); { LockHolder lh(this); pMethod->GetLCGMethodResolver()->m_next = m_DynamicMethodList; m_DynamicMethodList = pMethod; #ifdef _DEBUG m_Used--; #endif } RETURN; } // // CodeHeap implementation // HeapList* HostCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, EEJitManager *pJitManager) { CONTRACT (HeapList*) { THROWS; GC_NOTRIGGER; MODE_ANY; INJECT_FAULT(COMPlusThrowOM()); POSTCONDITION((RETVAL != NULL) || !pInfo->getThrowOnOutOfMemoryWithinRange()); } CONTRACT_END; NewHolder<HostCodeHeap> pCodeHeap(new HostCodeHeap(pJitManager)); HeapList *pHp = pCodeHeap->InitializeHeapList(pInfo); if (pHp == NULL) { _ASSERTE(!pInfo->getThrowOnOutOfMemoryWithinRange()); RETURN NULL; } LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap creation {0x%p} - base addr 0x%p, size available 0x%p, nibble map ptr 0x%p\n", (HostCodeHeap*)pCodeHeap, pCodeHeap->m_pBaseAddr, pCodeHeap->m_TotalBytesAvailable, pCodeHeap->m_pHeapList->pHdrMap)); pCodeHeap.SuppressRelease(); LOG((LF_BCL, LL_INFO10, "Level1 - CodeHeap created {0x%p}\n", (HostCodeHeap*)pCodeHeap)); RETURN pHp; } HostCodeHeap::HostCodeHeap(EEJitManager *pJitManager) { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; INJECT_FAULT(COMPlusThrowOM()); } CONTRACTL_END; m_pBaseAddr = NULL; m_pLastAvailableCommittedAddr = NULL; m_TotalBytesAvailable = 0; m_ApproximateLargestBlock = 0; m_AllocationCount = 0; m_pHeapList = NULL; m_pJitManager = (PTR_EEJitManager)pJitManager; m_pFreeList = NULL; m_pAllocator = NULL; m_pNextHeapToRelease = NULL; } HostCodeHeap::~HostCodeHeap() { LIMITED_METHOD_CONTRACT; if (m_pHeapList != NULL && m_pHeapList->pHdrMap != NULL) delete[] m_pHeapList->pHdrMap; if (m_pBaseAddr) ExecutableAllocator::Instance()->Release(m_pBaseAddr); LOG((LF_BCL, LL_INFO10, "Level1 - CodeHeap destroyed {0x%p}\n", this)); } HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; size_t ReserveBlockSize = pInfo->getRequestSize(); // Add TrackAllocation, HeapList and very conservative padding to make sure we have enough for the allocation ReserveBlockSize += sizeof(TrackAllocation) + HOST_CODEHEAP_SIZE_ALIGN + 0x100; #if defined(TARGET_AMD64) || defined(TARGET_ARM64) ReserveBlockSize += JUMP_ALLOCATE_SIZE; #endif // reserve ReserveBlockSize rounded-up to VIRTUAL_ALLOC_RESERVE_GRANULARITY of memory ReserveBlockSize = ALIGN_UP(ReserveBlockSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY); if (pInfo->m_loAddr != NULL || pInfo->m_hiAddr != NULL) { m_pBaseAddr = (BYTE*)ExecutableAllocator::Instance()->ReserveWithinRange(ReserveBlockSize, pInfo->m_loAddr, pInfo->m_hiAddr); if (!m_pBaseAddr) { if (pInfo->getThrowOnOutOfMemoryWithinRange()) ThrowOutOfMemoryWithinRange(); return NULL; } } else { // top up the ReserveBlockSize to suggested minimum ReserveBlockSize = max(ReserveBlockSize, pInfo->getReserveSize()); m_pBaseAddr = (BYTE*)ExecutableAllocator::Instance()->Reserve(ReserveBlockSize); if (!m_pBaseAddr) ThrowOutOfMemory(); } m_pLastAvailableCommittedAddr = m_pBaseAddr; m_TotalBytesAvailable = ReserveBlockSize; m_ApproximateLargestBlock = ReserveBlockSize; m_pAllocator = pInfo->m_pAllocator; HeapList* pHp = new HeapList; TrackAllocation *pTracker = NULL; #if defined(TARGET_AMD64) || defined(TARGET_ARM64) pTracker = AllocMemory_NoThrow(0, JUMP_ALLOCATE_SIZE, sizeof(void*), 0); if (pTracker == NULL) { // This should only ever happen with fault injection _ASSERTE(g_pConfig->ShouldInjectFault(INJECTFAULT_DYNAMICCODEHEAP)); delete pHp; ThrowOutOfMemory(); } pHp->CLRPersonalityRoutine = (BYTE *)(pTracker + 1); #endif pHp->hpNext = NULL; pHp->pHeap = (PTR_CodeHeap)this; // wire it back m_pHeapList = (PTR_HeapList)pHp; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap creation {0x%p} - size available 0x%p, private data ptr [0x%p, 0x%p]\n", (HostCodeHeap*)this, m_TotalBytesAvailable, pTracker, pTracker->size)); // It is important to exclude the CLRPersonalityRoutine from the tracked range pHp->startAddress = dac_cast<TADDR>(m_pBaseAddr) + (pTracker ? pTracker->size : 0); pHp->mapBase = ROUND_DOWN_TO_PAGE(pHp->startAddress); // round down to next lower page align pHp->pHdrMap = NULL; pHp->endAddress = pHp->startAddress; pHp->maxCodeHeapSize = m_TotalBytesAvailable - (pTracker ? pTracker->size : 0); pHp->reserveForJumpStubs = 0; #ifdef HOST_64BIT ExecutableWriterHolder<BYTE> personalityRoutineWriterHolder(pHp->CLRPersonalityRoutine, 12); emitJump(pHp->CLRPersonalityRoutine, personalityRoutineWriterHolder.GetRW(), (void *)ProcessCLRException); #endif size_t nibbleMapSize = HEAP2MAPSIZE(ROUND_UP_TO_PAGE(pHp->maxCodeHeapSize)); pHp->pHdrMap = new DWORD[nibbleMapSize / sizeof(DWORD)]; ZeroMemory(pHp->pHdrMap, nibbleMapSize); return pHp; } HostCodeHeap::TrackAllocation* HostCodeHeap::AllocFromFreeList(size_t header, size_t size, DWORD alignment, size_t reserveForJumpStubs) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; if (m_pFreeList) { LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Alloc size corrected 0x%X for free list\n", this, size)); // walk the list looking for a block with enough capacity TrackAllocation *pCurrent = m_pFreeList; TrackAllocation *pPrevious = NULL; while (pCurrent) { BYTE* pPointer = ALIGN_UP((BYTE*)(pCurrent + 1) + header, alignment); size_t realSize = ALIGN_UP(pPointer + size, sizeof(void*)) - (BYTE*)pCurrent; if (pCurrent->size >= realSize + reserveForJumpStubs) { // found a block LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Block found, size 0x%X\n", this, pCurrent->size)); ExecutableWriterHolder<TrackAllocation> previousWriterHolder; if (pPrevious) { previousWriterHolder = ExecutableWriterHolder<TrackAllocation>(pPrevious, sizeof(TrackAllocation)); } ExecutableWriterHolder<TrackAllocation> currentWriterHolder(pCurrent, sizeof(TrackAllocation)); // The space left is not big enough for a new block, let's just // update the TrackAllocation record for the current block if (pCurrent->size - realSize < max(HOST_CODEHEAP_SIZE_ALIGN, sizeof(TrackAllocation))) { LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Item removed %p, size 0x%X\n", this, pCurrent, pCurrent->size)); // remove current if (pPrevious) { previousWriterHolder.GetRW()->pNext = pCurrent->pNext; } else { m_pFreeList = pCurrent->pNext; } } else { // create a new TrackAllocation after the memory we just allocated and insert it into the free list TrackAllocation *pNewCurrent = (TrackAllocation*)((BYTE*)pCurrent + realSize); ExecutableWriterHolder<TrackAllocation> newCurrentWriterHolder(pNewCurrent, sizeof(TrackAllocation)); newCurrentWriterHolder.GetRW()->pNext = pCurrent->pNext; newCurrentWriterHolder.GetRW()->size = pCurrent->size - realSize; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Item changed %p, new size 0x%X\n", this, pNewCurrent, pNewCurrent->size)); if (pPrevious) { previousWriterHolder.GetRW()->pNext = pNewCurrent; } else { m_pFreeList = pNewCurrent; } // We only need to update the size of the current block if we are creating a new block currentWriterHolder.GetRW()->size = realSize; } currentWriterHolder.GetRW()->pHeap = this; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Allocation returned %p, size 0x%X - data -> %p\n", this, pCurrent, pCurrent->size, pPointer)); return pCurrent; } pPrevious = pCurrent; pCurrent = pCurrent->pNext; } } LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - No block in free list for size 0x%X\n", this, size)); return NULL; } void HostCodeHeap::AddToFreeList(TrackAllocation *pBlockToInsert, TrackAllocation *pBlockToInsertRW) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Add to FreeList [%p, 0x%X]\n", this, pBlockToInsert, pBlockToInsert->size)); // append to the list in the proper position and coalesce if needed if (m_pFreeList) { TrackAllocation *pCurrent = m_pFreeList; TrackAllocation *pPrevious = NULL; while (pCurrent) { if (pCurrent > pBlockToInsert) { // found the point of insertion pBlockToInsertRW->pNext = pCurrent; ExecutableWriterHolder<TrackAllocation> previousWriterHolder; if (pPrevious) { previousWriterHolder = ExecutableWriterHolder<TrackAllocation>(pPrevious, sizeof(TrackAllocation)); previousWriterHolder.GetRW()->pNext = pBlockToInsert; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Insert block [%p, 0x%X] -> [%p, 0x%X] -> [%p, 0x%X]\n", this, pPrevious, pPrevious->size, pBlockToInsert, pBlockToInsert->size, pCurrent, pCurrent->size)); } else { m_pFreeList = pBlockToInsert; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Insert block [%p, 0x%X] to head\n", this, pBlockToInsert, pBlockToInsert->size)); } // check for coalescing if ((BYTE*)pBlockToInsert + pBlockToInsert->size == (BYTE*)pCurrent) { // coalesce with next LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Coalesce block [%p, 0x%X] with [%p, 0x%X] - new size 0x%X\n", this, pBlockToInsert, pBlockToInsert->size, pCurrent, pCurrent->size, pCurrent->size + pBlockToInsert->size)); pBlockToInsertRW->pNext = pCurrent->pNext; pBlockToInsertRW->size += pCurrent->size; } if (pPrevious && (BYTE*)pPrevious + pPrevious->size == (BYTE*)pBlockToInsert) { // coalesce with previous LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Coalesce block [%p, 0x%X] with [%p, 0x%X] - new size 0x%X\n", this, pPrevious, pPrevious->size, pBlockToInsert, pBlockToInsert->size, pPrevious->size + pBlockToInsert->size)); previousWriterHolder.GetRW()->pNext = pBlockToInsert->pNext; previousWriterHolder.GetRW()->size += pBlockToInsert->size; } return; } pPrevious = pCurrent; pCurrent = pCurrent->pNext; } _ASSERTE(pPrevious && pCurrent == NULL); pBlockToInsertRW->pNext = NULL; // last in the list ExecutableWriterHolder<TrackAllocation> previousWriterHolder2(pPrevious, sizeof(TrackAllocation)); if ((BYTE*)pPrevious + pPrevious->size == (BYTE*)pBlockToInsert) { // coalesce with previous LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Coalesce block [%p, 0x%X] with [%p, 0x%X] - new size 0x%X\n", this, pPrevious, pPrevious->size, pBlockToInsert, pBlockToInsert->size, pPrevious->size + pBlockToInsert->size)); previousWriterHolder2.GetRW()->size += pBlockToInsert->size; } else { previousWriterHolder2.GetRW()->pNext = pBlockToInsert; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Insert block [%p, 0x%X] to end after [%p, 0x%X]\n", this, pBlockToInsert, pBlockToInsert->size, pPrevious, pPrevious->size)); } return; } // first in the list pBlockToInsertRW->pNext = m_pFreeList; m_pFreeList = pBlockToInsert; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Insert block [%p, 0x%X] to head\n", this, m_pFreeList, m_pFreeList->size)); } void* HostCodeHeap::AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment, size_t reserveForJumpStubs) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; _ASSERTE(header == sizeof(CodeHeader)); _ASSERTE(alignment <= HOST_CODEHEAP_SIZE_ALIGN); // The code allocator has to guarantee that there is only one entrypoint per nibble map entry. // It is guaranteed because of HostCodeHeap allocator always aligns the size up to HOST_CODEHEAP_SIZE_ALIGN, // and because the size of nibble map entries (BYTES_PER_BUCKET) is smaller than HOST_CODEHEAP_SIZE_ALIGN. // Assert the later fact here. _ASSERTE(HOST_CODEHEAP_SIZE_ALIGN >= BYTES_PER_BUCKET); header += sizeof(TrackAllocation*); TrackAllocation* pTracker = AllocMemory_NoThrow(header, size, alignment, reserveForJumpStubs); if (pTracker == NULL) return NULL; BYTE * pCode = ALIGN_UP((BYTE*)(pTracker + 1) + header, alignment); // Pointer to the TrackAllocation record is stored just before the code header CodeHeader * pHdr = (CodeHeader *)pCode - 1; ExecutableWriterHolder<TrackAllocation *> trackerWriterHolder((TrackAllocation **)(pHdr) - 1, sizeof(TrackAllocation *)); *trackerWriterHolder.GetRW() = pTracker; _ASSERTE(pCode + size <= (BYTE*)pTracker + pTracker->size); // ref count the whole heap m_AllocationCount++; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - ref count %d\n", this, m_AllocationCount)); return pCode; } HostCodeHeap::TrackAllocation* HostCodeHeap::AllocMemory_NoThrow(size_t header, size_t size, DWORD alignment, size_t reserveForJumpStubs) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; #ifdef _DEBUG if (g_pConfig->ShouldInjectFault(INJECTFAULT_DYNAMICCODEHEAP)) { char *a = new (nothrow) char; if (a == NULL) return NULL; delete a; } #endif // _DEBUG // Skip walking the free list if the cached size of the largest block is not enough size_t totalRequiredSize = ALIGN_UP(sizeof(TrackAllocation) + header + size + (alignment - 1) + reserveForJumpStubs, sizeof(void*)); if (totalRequiredSize > m_ApproximateLargestBlock) return NULL; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Allocation requested 0x%X\n", this, size)); TrackAllocation* pTracker = AllocFromFreeList(header, size, alignment, reserveForJumpStubs); if (!pTracker) { // walk free list to end to find available space size_t availableInFreeList = 0; TrackAllocation *pCurrentBlock = m_pFreeList; TrackAllocation *pLastBlock = NULL; while (pCurrentBlock) { pLastBlock = pCurrentBlock; pCurrentBlock = pCurrentBlock->pNext; } if (pLastBlock && (BYTE*)pLastBlock + pLastBlock->size == m_pLastAvailableCommittedAddr) { availableInFreeList = pLastBlock->size; } _ASSERTE(totalRequiredSize > availableInFreeList); size_t sizeToCommit = totalRequiredSize - availableInFreeList; sizeToCommit = ROUND_UP_TO_PAGE(sizeToCommit); if (m_pLastAvailableCommittedAddr + sizeToCommit <= m_pBaseAddr + m_TotalBytesAvailable) { if (NULL == ExecutableAllocator::Instance()->Commit(m_pLastAvailableCommittedAddr, sizeToCommit, true /* isExecutable */)) { LOG((LF_BCL, LL_ERROR, "CodeHeap [0x%p] - VirtualAlloc failed\n", this)); return NULL; } TrackAllocation *pBlockToInsert = (TrackAllocation*)(void*)m_pLastAvailableCommittedAddr; ExecutableWriterHolder<TrackAllocation> blockToInsertWriterHolder(pBlockToInsert, sizeof(TrackAllocation)); blockToInsertWriterHolder.GetRW()->pNext = NULL; blockToInsertWriterHolder.GetRW()->size = sizeToCommit; m_pLastAvailableCommittedAddr += sizeToCommit; AddToFreeList(pBlockToInsert, blockToInsertWriterHolder.GetRW()); pTracker = AllocFromFreeList(header, size, alignment, reserveForJumpStubs); _ASSERTE(pTracker != NULL); } else { LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - allocation failed:\n\tm_pLastAvailableCommittedAddr: 0x%X\n\tsizeToCommit: 0x%X\n\tm_pBaseAddr: 0x%X\n\tm_TotalBytesAvailable: 0x%X\n", this, m_pLastAvailableCommittedAddr, sizeToCommit, m_pBaseAddr, m_TotalBytesAvailable)); // Update largest available block size m_ApproximateLargestBlock = totalRequiredSize - 1; } } return pTracker; } #endif //!DACCESS_COMPILE #ifdef DACCESS_COMPILE void HostCodeHeap::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { WRAPPER_NO_CONTRACT; DAC_ENUM_DTHIS(); TADDR addr = dac_cast<TADDR>(m_pBaseAddr); size_t size = dac_cast<TADDR>(m_pLastAvailableCommittedAddr) - addr; #if (_DEBUG) // Test hook: when testing on debug builds, we want an easy way to test that the while // correctly terminates in the face of ridiculous stuff from the target. if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DumpGeneration_IntentionallyCorruptDataFromTarget) == 1) { // Pretend the object is very large. size |= 0xf0000000; } #endif // (_DEBUG) while (size) { ULONG32 enumSize; if (size > 0x80000000) { enumSize = 0x80000000; } else { enumSize = (ULONG32)size; } // If we can't read the target memory, stop immediately so we don't work // with broken data. if (!DacEnumMemoryRegion(addr, enumSize)) break; addr += enumSize; size -= enumSize; } } #endif // DACCESS_COMPILE // static struct HostCodeHeap::TrackAllocation * HostCodeHeap::GetTrackAllocation(TADDR codeStart) { LIMITED_METHOD_CONTRACT; CodeHeader * pHdr = dac_cast<PTR_CodeHeader>(PCODEToPINSTR(codeStart)) - 1; // Pointer to the TrackAllocation record is stored just before the code header return *((TrackAllocation **)(pHdr) - 1); } HostCodeHeap* HostCodeHeap::GetCodeHeap(TADDR codeStart) { WRAPPER_NO_CONTRACT; return HostCodeHeap::GetTrackAllocation(codeStart)->pHeap; } #ifndef DACCESS_COMPILE void HostCodeHeap::FreeMemForCode(void * codeStart) { LIMITED_METHOD_CONTRACT; TrackAllocation *pTracker = HostCodeHeap::GetTrackAllocation((TADDR)codeStart); ExecutableWriterHolder<TrackAllocation> trackerWriterHolder(pTracker, sizeof(TrackAllocation)); AddToFreeList(pTracker, trackerWriterHolder.GetRW()); m_ApproximateLargestBlock += pTracker->size; m_AllocationCount--; LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap released [0x%p, vt(0x%x)] - ref count %d\n", this, *(size_t*)this, m_AllocationCount)); if (m_AllocationCount == 0) { m_pJitManager->AddToCleanupList(this); } } // // Implementation for DynamicMethodDesc declared in method.hpp // void DynamicMethodDesc::Destroy() { CONTRACTL { THROWS; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; _ASSERTE(IsDynamicMethod()); LoaderAllocator *pLoaderAllocator = GetLoaderAllocator(); LOG((LF_BCL, LL_INFO1000, "Level3 - Destroying DynamicMethod {0x%p}\n", this)); // The m_pSig and m_pszMethodName need to be destroyed after the GetLCGMethodResolver()->Destroy() call // otherwise the EEJitManager::CodeHeapIterator could return DynamicMethodDesc with these members NULLed, but // the nibble map for the corresponding code memory indicating that this DynamicMethodDesc is still alive. PCODE pSig = m_pSig; PTR_CUTF8 pszMethodName = m_pszMethodName; GetLCGMethodResolver()->Destroy(); // The current DynamicMethodDesc storage is destroyed at this point if (pszMethodName != NULL) { delete[] pszMethodName; } if (pSig != NULL) { delete[] (BYTE*)pSig; } if (pLoaderAllocator->IsCollectible()) { if (pLoaderAllocator->Release()) { GCX_PREEMP(); LoaderAllocator::GCLoaderAllocators(pLoaderAllocator); } } } // // The resolver object is reused when the method is destroyed, // this will reset its state for the next use. // void LCGMethodResolver::Reset() { m_DynamicStringLiterals = NULL; m_recordCodePointer = NULL; m_UsedIndCellList = NULL; m_pJumpStubCache = NULL; m_next = NULL; m_Code = NULL; } // // Recycle all the indcells in m_UsedIndCellList by adding them to the free list // void LCGMethodResolver::RecycleIndCells() { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; // Append the list of indirection cells used by this dynamic method to the free list IndCellList * list = m_UsedIndCellList; if (list) { BYTE * cellhead = list->indcell; BYTE * cellprev = NULL; BYTE * cellcurr = NULL; // Build a linked list of indirection cells from m_UsedIndCellList. // No need to lock newlist because this method is only called during the finalization of // DynamicResolver.DestroyScout and at that time no one else should be modifying m_UsedIndCellList. while (list) { cellcurr = list->indcell; _ASSERTE(cellcurr != NULL); if (cellprev) *((BYTE**)cellprev) = cellcurr; list = list->pNext; cellprev = cellcurr; } // Insert the linked list to the free list of the VirtualCallStubManager of the current domain. // We should use GetLoaderAllocator because that is where the ind cell was allocated. LoaderAllocator *pLoaderAllocator = GetDynamicMethod()->GetLoaderAllocator(); VirtualCallStubManager *pMgr = pLoaderAllocator->GetVirtualCallStubManager(); pMgr->InsertIntoRecycledIndCellList_Locked(cellhead, cellcurr); m_UsedIndCellList = NULL; } } void LCGMethodResolver::Destroy() { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; LOG((LF_BCL, LL_INFO100, "Level2 - Resolver - Destroying Resolver {0x%p}\n", this)); if (m_Code) { delete[] m_Code; m_Code = NULL; } m_CodeSize = 0; if (!m_LocalSig.IsNull()) { delete[] m_LocalSig.GetPtr(); m_LocalSig = SigPointer(); } // Get the global string literal interning map GlobalStringLiteralMap* pStringLiteralMap = SystemDomain::GetGlobalStringLiteralMapNoCreate(); // release references to all the string literals used in this Dynamic Method if (pStringLiteralMap != NULL) { // lock the global string literal interning map // we cannot use GetGlobalStringLiteralMap() here because it might throw CrstHolder gch(pStringLiteralMap->GetHashTableCrstGlobal()); // Access to m_DynamicStringLiterals doesn't need to be syncrhonized because // this can be run in only one thread: the finalizer thread. while (m_DynamicStringLiterals != NULL) { m_DynamicStringLiterals->m_pEntry->Release(); m_DynamicStringLiterals = m_DynamicStringLiterals->m_pNext; } } // Note that we need to do this before m_jitTempData is deleted RecycleIndCells(); m_jitMetaHeap.Delete(); m_jitTempData.Delete(); if (m_recordCodePointer) { #if defined(TARGET_AMD64) // Remove the unwind information (if applicable) UnwindInfoTable::UnpublishUnwindInfoForMethod((TADDR)m_recordCodePointer); #endif // defined(TARGET_AMD64) HostCodeHeap *pHeap = HostCodeHeap::GetCodeHeap((TADDR)m_recordCodePointer); LOG((LF_BCL, LL_INFO1000, "Level3 - Resolver {0x%p} - Release reference to heap {%p, vt(0x%x)} \n", this, pHeap, *(size_t*)pHeap)); pHeap->m_pJitManager->FreeCodeMemory(pHeap, m_recordCodePointer); m_recordCodePointer = NULL; } if (m_pJumpStubCache != NULL) { JumpStubBlockHeader* current = m_pJumpStubCache->m_pBlocks; while (current) { JumpStubBlockHeader* next = current->m_next; HostCodeHeap *pHeap = current->GetHostCodeHeap(); LOG((LF_BCL, LL_INFO1000, "Level3 - Resolver {0x%p} - Release reference to heap {%p, vt(0x%x)} \n", current, pHeap, *(size_t*)pHeap)); pHeap->m_pJitManager->FreeCodeMemory(pHeap, current); current = next; } m_pJumpStubCache->m_pBlocks = NULL; delete m_pJumpStubCache; m_pJumpStubCache = NULL; } if (m_managedResolver) { ::DestroyLongWeakHandle(m_managedResolver); m_managedResolver = NULL; } m_DynamicMethodTable->LinkMethod(m_pDynamicMethod); } void LCGMethodResolver::FreeCompileTimeState() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; //m_jitTempData.Delete(); } void LCGMethodResolver::GetJitContext(SecurityControlFlags * securityControlFlags, TypeHandle *typeOwner) { CONTRACTL { STANDARD_VM_CHECK; PRECONDITION(CheckPointer(securityControlFlags)); PRECONDITION(CheckPointer(typeOwner)); } CONTRACTL_END; GCX_COOP(); MethodDescCallSite getJitContext(METHOD__RESOLVER__GET_JIT_CONTEXT, m_managedResolver); OBJECTREF resolver = ObjectFromHandle(m_managedResolver); _ASSERTE(resolver); // gc root must be up the stack ARG_SLOT args[] = { ObjToArgSlot(resolver), PtrToArgSlot(securityControlFlags), }; REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)getJitContext.Call_RetOBJECTREF(args); *typeOwner = refType != NULL ? refType->GetType() : TypeHandle(); } ChunkAllocator* LCGMethodResolver::GetJitMetaHeap() { LIMITED_METHOD_CONTRACT; return &m_jitMetaHeap; } BYTE* LCGMethodResolver::GetCodeInfo(unsigned *pCodeSize, unsigned *pStackSize, CorInfoOptions *pOptions, unsigned *pEHSize) { STANDARD_VM_CONTRACT; _ASSERTE(pCodeSize); if (!m_Code) { GCX_COOP(); LOG((LF_BCL, LL_INFO100000, "Level5 - DM-JIT: Getting CodeInfo on resolver 0x%p...\n", this)); // get the code - Byte[] Resolver.GetCodeInfo(ref ushort stackSize, ref int EHCount) MethodDescCallSite getCodeInfo(METHOD__RESOLVER__GET_CODE_INFO, m_managedResolver); OBJECTREF resolver = ObjectFromHandle(m_managedResolver); VALIDATEOBJECTREF(resolver); // gc root must be up the stack int32_t stackSize = 0, initLocals = 0, EHSize = 0; ARG_SLOT args[] = { ObjToArgSlot(resolver), PtrToArgSlot(&stackSize), PtrToArgSlot(&initLocals), PtrToArgSlot(&EHSize), }; U1ARRAYREF dataArray = (U1ARRAYREF) getCodeInfo.Call_RetOBJECTREF(args); DWORD codeSize = dataArray->GetNumComponents(); NewArrayHolder<BYTE> code(new BYTE[codeSize]); memcpy(code, dataArray->GetDataPtr(), codeSize); m_CodeSize = codeSize; _ASSERTE(FitsIn<unsigned short>(stackSize)); m_StackSize = static_cast<unsigned short>(stackSize); m_Options = (initLocals) ? CORINFO_OPT_INIT_LOCALS : (CorInfoOptions)0; _ASSERTE(FitsIn<unsigned short>(EHSize)); m_EHSize = static_cast<unsigned short>(EHSize); m_Code = (BYTE*)code; code.SuppressRelease(); LOG((LF_BCL, LL_INFO100000, "Level5 - DM-JIT: CodeInfo {0x%p} on resolver %p\n", m_Code, this)); } *pCodeSize = m_CodeSize; if (pStackSize) *pStackSize = m_StackSize; if (pOptions) *pOptions = m_Options; if (pEHSize) *pEHSize = m_EHSize; return m_Code; } //--------------------------------------------------------------------------------------- // SigPointer LCGMethodResolver::GetLocalSig() { STANDARD_VM_CONTRACT; if (m_LocalSig.IsNull()) { GCX_COOP(); LOG((LF_BCL, LL_INFO100000, "Level5 - DM-JIT: Getting LocalSig on resolver 0x%p...\n", this)); MethodDescCallSite getLocalsSignature(METHOD__RESOLVER__GET_LOCALS_SIGNATURE, m_managedResolver); OBJECTREF resolver = ObjectFromHandle(m_managedResolver); VALIDATEOBJECTREF(resolver); // gc root must be up the stack ARG_SLOT args[] = { ObjToArgSlot(resolver) }; U1ARRAYREF dataArray = (U1ARRAYREF) getLocalsSignature.Call_RetOBJECTREF(args); DWORD localSigSize = dataArray->GetNumComponents(); NewArrayHolder<COR_SIGNATURE> localSig(new COR_SIGNATURE[localSigSize]); memcpy((void *)localSig, dataArray->GetDataPtr(), localSigSize); m_LocalSig = SigPointer((PCCOR_SIGNATURE)localSig, localSigSize); localSig.SuppressRelease(); LOG((LF_BCL, LL_INFO100000, "Level5 - DM-JIT: LocalSig {0x%p} on resolver %p\n", m_LocalSig.GetPtr(), this)); } return m_LocalSig; } // LCGMethodResolver::GetLocalSig //--------------------------------------------------------------------------------------- // OBJECTHANDLE LCGMethodResolver::ConstructStringLiteral(mdToken metaTok) { STANDARD_VM_CONTRACT; GCX_COOP(); OBJECTHANDLE string = NULL; STRINGREF strRef = GetStringLiteral(metaTok); GCPROTECT_BEGIN(strRef); if (strRef != NULL) { // Instead of storing the string literal in the appdomain specific string literal map, // we store it in the dynamic method specific string liternal list // This way we can release it when the dynamic method is collected. string = (OBJECTHANDLE)GetOrInternString(&strRef); } GCPROTECT_END(); return string; } //--------------------------------------------------------------------------------------- // BOOL LCGMethodResolver::IsValidStringRef(mdToken metaTok) { STANDARD_VM_CONTRACT; GCX_COOP(); return GetStringLiteral(metaTok) != NULL; } int LCGMethodResolver::GetStringLiteralLength(mdToken metaTok) { STANDARD_VM_CONTRACT; GCX_COOP(); STRINGREF str = GetStringLiteral(metaTok); if (str != NULL) { return str->GetStringLength(); } return -1; } //--------------------------------------------------------------------------------------- // STRINGREF LCGMethodResolver::GetStringLiteral( mdToken token) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; MethodDescCallSite getStringLiteral(METHOD__RESOLVER__GET_STRING_LITERAL, m_managedResolver); OBJECTREF resolver = ObjectFromHandle(m_managedResolver); VALIDATEOBJECTREF(resolver); // gc root must be up the stack ARG_SLOT args[] = { ObjToArgSlot(resolver), token, }; return getStringLiteral.Call_RetSTRINGREF(args); } // This method will get the interned string by calling GetInternedString on the // global string liternal interning map. It will also store the returned entry // in m_DynamicStringLiterals STRINGREF* LCGMethodResolver::GetOrInternString(STRINGREF *pProtectedStringRef) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(CheckPointer(pProtectedStringRef)); } CONTRACTL_END; // Get the global string literal interning map GlobalStringLiteralMap* pStringLiteralMap = SystemDomain::GetGlobalStringLiteralMap(); // Calculating the hash: EEUnicodeHashTableHelper::GetHash EEStringData StringData = EEStringData((*pProtectedStringRef)->GetStringLength(), (*pProtectedStringRef)->GetBuffer()); DWORD dwHash = pStringLiteralMap->GetHash(&StringData); // lock the global string literal interning map CrstHolder gch(pStringLiteralMap->GetHashTableCrstGlobal()); StringLiteralEntryHolder pEntry(pStringLiteralMap->GetInternedString(pProtectedStringRef, dwHash, /* bAddIfNotFound */ TRUE)); DynamicStringLiteral* pStringLiteral = (DynamicStringLiteral*)m_jitTempData.New(sizeof(DynamicStringLiteral)); pStringLiteral->m_pEntry = pEntry.Extract(); // Add to m_DynamicStringLiterals: // we don't need to check for duplicate because the string literal entries in // the global string literal map are ref counted. pStringLiteral->m_pNext = m_DynamicStringLiterals; m_DynamicStringLiterals = pStringLiteral; return pStringLiteral->m_pEntry->GetStringObject(); } // AddToUsedIndCellList adds a IndCellList link to the beginning of m_UsedIndCellList. It is called by // code:CEEInfo::getCallInfo when a indirection cell is allocated for m_pDynamicMethod. // All the indirection cells usded by m_pDynamicMethod will be recycled when this resolver // is finalized, see code:LCGMethodResolver::RecycleIndCells void LCGMethodResolver::AddToUsedIndCellList(BYTE * indcell) { CONTRACTL { STANDARD_VM_CHECK; PRECONDITION(CheckPointer(indcell)); } CONTRACTL_END; IndCellList * link = (IndCellList *)m_jitTempData.New(sizeof(IndCellList)); link->indcell = indcell; // Insert into m_UsedIndCellList while (true) { link->pNext = m_UsedIndCellList; if (InterlockedCompareExchangeT(&m_UsedIndCellList, link, link->pNext) == link->pNext) break; } } void LCGMethodResolver::ResolveToken(mdToken token, TypeHandle * pTH, MethodDesc ** ppMD, FieldDesc ** ppFD) { STANDARD_VM_CONTRACT; GCX_COOP(); PREPARE_SIMPLE_VIRTUAL_CALLSITE(METHOD__RESOLVER__RESOLVE_TOKEN, ObjectFromHandle(m_managedResolver)); DECLARE_ARGHOLDER_ARRAY(args, 5); args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(ObjectFromHandle(m_managedResolver)); args[ARGNUM_1] = DWORD_TO_ARGHOLDER(token); args[ARGNUM_2] = pTH; args[ARGNUM_3] = ppMD; args[ARGNUM_4] = ppFD; CALL_MANAGED_METHOD_NORET(args); _ASSERTE(*ppMD == NULL || *ppFD == NULL); if (pTH->IsNull()) { if (*ppMD != NULL) *pTH = (*ppMD)->GetMethodTable(); else if (*ppFD != NULL) *pTH = (*ppFD)->GetEnclosingMethodTable(); } _ASSERTE(!pTH->IsNull()); } //--------------------------------------------------------------------------------------- // SigPointer LCGMethodResolver::ResolveSignature( mdToken token) { STANDARD_VM_CONTRACT; GCX_COOP(); U1ARRAYREF dataArray = NULL; PREPARE_SIMPLE_VIRTUAL_CALLSITE(METHOD__RESOLVER__RESOLVE_SIGNATURE, ObjectFromHandle(m_managedResolver)); DECLARE_ARGHOLDER_ARRAY(args, 3); args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(ObjectFromHandle(m_managedResolver)); args[ARGNUM_1] = DWORD_TO_ARGHOLDER(token); args[ARGNUM_2] = DWORD_TO_ARGHOLDER(0); CALL_MANAGED_METHOD_RETREF(dataArray, U1ARRAYREF, args); if (dataArray == NULL) COMPlusThrow(kInvalidProgramException); DWORD cbSig = dataArray->GetNumComponents(); PCCOR_SIGNATURE pSig = (PCCOR_SIGNATURE)m_jitTempData.New(cbSig); memcpy((void *)pSig, dataArray->GetDataPtr(), cbSig); return SigPointer(pSig, cbSig); } // LCGMethodResolver::ResolveSignature //--------------------------------------------------------------------------------------- // SigPointer LCGMethodResolver::ResolveSignatureForVarArg( mdToken token) { STANDARD_VM_CONTRACT; GCX_COOP(); U1ARRAYREF dataArray = NULL; PREPARE_SIMPLE_VIRTUAL_CALLSITE(METHOD__RESOLVER__RESOLVE_SIGNATURE, ObjectFromHandle(m_managedResolver)); DECLARE_ARGHOLDER_ARRAY(args, 3); args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(ObjectFromHandle(m_managedResolver)); args[ARGNUM_1] = DWORD_TO_ARGHOLDER(token); args[ARGNUM_2] = DWORD_TO_ARGHOLDER(1); CALL_MANAGED_METHOD_RETREF(dataArray, U1ARRAYREF, args); if (dataArray == NULL) COMPlusThrow(kInvalidProgramException); DWORD cbSig = dataArray->GetNumComponents(); PCCOR_SIGNATURE pSig = (PCCOR_SIGNATURE)m_jitTempData.New(cbSig); memcpy((void *)pSig, dataArray->GetDataPtr(), cbSig); return SigPointer(pSig, cbSig); } // LCGMethodResolver::ResolveSignatureForVarArg //--------------------------------------------------------------------------------------- // void LCGMethodResolver::GetEHInfo(unsigned EHnumber, CORINFO_EH_CLAUSE* clause) { STANDARD_VM_CONTRACT; GCX_COOP(); // attempt to get the raw EHInfo first { U1ARRAYREF dataArray; PREPARE_SIMPLE_VIRTUAL_CALLSITE(METHOD__RESOLVER__GET_RAW_EH_INFO, ObjectFromHandle(m_managedResolver)); DECLARE_ARGHOLDER_ARRAY(args, 1); args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(ObjectFromHandle(m_managedResolver)); CALL_MANAGED_METHOD_RETREF(dataArray, U1ARRAYREF, args); if (dataArray != NULL) { COR_ILMETHOD_SECT_EH* pEH = (COR_ILMETHOD_SECT_EH*)dataArray->GetDataPtr(); COR_ILMETHOD_SECT_EH_CLAUSE_FAT ehClause; const COR_ILMETHOD_SECT_EH_CLAUSE_FAT* ehInfo; ehInfo = (COR_ILMETHOD_SECT_EH_CLAUSE_FAT*)pEH->EHClause(EHnumber, &ehClause); clause->Flags = (CORINFO_EH_CLAUSE_FLAGS)ehInfo->GetFlags(); clause->TryOffset = ehInfo->GetTryOffset(); clause->TryLength = ehInfo->GetTryLength(); clause->HandlerOffset = ehInfo->GetHandlerOffset(); clause->HandlerLength = ehInfo->GetHandlerLength(); clause->ClassToken = ehInfo->GetClassToken(); clause->FilterOffset = ehInfo->GetFilterOffset(); return; } } // failed, get the info off the ilgenerator { PREPARE_SIMPLE_VIRTUAL_CALLSITE(METHOD__RESOLVER__GET_EH_INFO, ObjectFromHandle(m_managedResolver)); DECLARE_ARGHOLDER_ARRAY(args, 3); args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(ObjectFromHandle(m_managedResolver)); args[ARGNUM_1] = DWORD_TO_ARGHOLDER(EHnumber); args[ARGNUM_2] = PTR_TO_ARGHOLDER(clause); CALL_MANAGED_METHOD_NORET(args); } } #endif // !DACCESS_COMPILE // Get the associated managed resolver. This method will be called during a GC so it should not throw, trigger a GC or cause the // object in question to be validated. OBJECTREF LCGMethodResolver::GetManagedResolver() { LIMITED_METHOD_CONTRACT; return ObjectFromHandle(m_managedResolver); } // // ChunkAllocator implementation // ChunkAllocator::~ChunkAllocator() { LIMITED_METHOD_CONTRACT; Delete(); } void ChunkAllocator::Delete() { LIMITED_METHOD_CONTRACT; BYTE *next = NULL; LOG((LF_BCL, LL_INFO10, "Level1 - DM - Allocator [0x%p] - deleting...\n", this)); while (m_pData) { LOG((LF_BCL, LL_INFO10, "Level1 - DM - Allocator [0x%p] - delete block {0x%p}\n", this, m_pData)); next = ((BYTE**)m_pData)[0]; delete[] m_pData; m_pData = next; } } void* ChunkAllocator::New(size_t size) { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; // We need to align it, otherwise we might get DataMisalignedException on IA64 size = ALIGN_UP(size, sizeof(void *)); BYTE *pNewBlock = NULL; LOG((LF_BCL, LL_INFO100, "Level2 - DM - Allocator [0x%p] - allocation requested 0x%X, available 0x%X\n", this, size, (m_pData) ? ((size_t*)m_pData)[1] : 0)); if (m_pData) { // we may have room available size_t available = ((size_t*)m_pData)[1]; if (size <= available) { LOG((LF_BCL, LL_INFO100, "Level2 - DM - Allocator [0x%p] - reusing block {0x%p}\n", this, m_pData)); ((size_t*)m_pData)[1] = available - size; pNewBlock = (m_pData + CHUNK_SIZE - available); LOG((LF_BCL, LL_INFO100, "Level2 - DM - Allocator [0x%p] - ptr -> 0x%p, available 0x%X\n", this, pNewBlock, ((size_t*)m_pData)[1])); return pNewBlock; } } // no available - need to allocate a new buffer if (size + (sizeof(void*) * 2) < CHUNK_SIZE) { // make the allocation NewArrayHolder<BYTE> newBlock(new BYTE[CHUNK_SIZE]); pNewBlock = (BYTE*)newBlock; ((size_t*)pNewBlock)[1] = CHUNK_SIZE - size - (sizeof(void*) * 2); LOG((LF_BCL, LL_INFO10, "Level1 - DM - Allocator [0x%p] - new block {0x%p}\n", this, pNewBlock)); newBlock.SuppressRelease(); } else { // request bigger than default size this is going to be a single block NewArrayHolder<BYTE> newBlock(new BYTE[size + (sizeof(void*) * 2)]); pNewBlock = (BYTE*)newBlock; ((size_t*)pNewBlock)[1] = 0; // no available bytes left LOG((LF_BCL, LL_INFO10, "Level1 - DM - Allocator [0x%p] - new BIG block {0x%p}\n", this, pNewBlock)); newBlock.SuppressRelease(); } // all we have left to do is to link the block. // We leave at the top the block with more bytes available if (m_pData) { if (((size_t*)pNewBlock)[1] > ((size_t*)m_pData)[1]) { ((BYTE**)pNewBlock)[0] = m_pData; m_pData = pNewBlock; } else { ((BYTE**)pNewBlock)[0] = ((BYTE**)m_pData)[0]; ((BYTE**)m_pData)[0] = pNewBlock; } } else { // this is the first allocation m_pData = pNewBlock; ((BYTE**)m_pData)[0] = NULL; } pNewBlock += (sizeof(void*) * 2); LOG((LF_BCL, LL_INFO100, "Level2 - DM - Allocator [0x%p] - ptr -> 0x%p, available 0x%X\n", this, pNewBlock, ((size_t*)m_pData)[1])); return pNewBlock; }
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/mono/mono/utils/os-event.h
/** * \file */ #ifndef _MONO_UTILS_OS_EVENT_H_ #define _MONO_UTILS_OS_EVENT_H_ #include <config.h> #include <glib.h> #include <mono/utils/mono-publib.h> #include "mono-os-mutex.h" #define MONO_INFINITE_WAIT ((guint32) 0xFFFFFFFF) #define MONO_OS_EVENT_WAIT_MAXIMUM_OBJECTS 64 typedef enum { MONO_OS_EVENT_WAIT_RET_SUCCESS_0 = 0, MONO_OS_EVENT_WAIT_RET_ALERTED = -1, MONO_OS_EVENT_WAIT_RET_TIMEOUT = -2, } MonoOSEventWaitRet; typedef struct _MonoOSEvent MonoOSEvent; typedef void (*MonoOSEventFreeCb) (MonoOSEvent*); struct _MonoOSEvent { #ifdef HOST_WIN32 gpointer handle; #else GPtrArray *conds; gboolean signalled; #endif }; MONO_API void mono_os_event_init (MonoOSEvent *event, gboolean initial); MONO_API void mono_os_event_destroy (MonoOSEvent *event); MONO_API void mono_os_event_set (MonoOSEvent *event); MONO_API void mono_os_event_reset (MonoOSEvent *event); MONO_API MonoOSEventWaitRet mono_os_event_wait_one (MonoOSEvent *event, guint32 timeout, gboolean alertable); MONO_API MonoOSEventWaitRet mono_os_event_wait_multiple (MonoOSEvent **events, gsize nevents, gboolean waitall, guint32 timeout, gboolean alertable); #endif /* _MONO_UTILS_OS_EVENT_H_ */
/** * \file */ #ifndef _MONO_UTILS_OS_EVENT_H_ #define _MONO_UTILS_OS_EVENT_H_ #include <config.h> #include <glib.h> #include <mono/utils/mono-publib.h> #include "mono-os-mutex.h" #define MONO_INFINITE_WAIT ((guint32) 0xFFFFFFFF) #define MONO_OS_EVENT_WAIT_MAXIMUM_OBJECTS 64 typedef enum { MONO_OS_EVENT_WAIT_RET_SUCCESS_0 = 0, MONO_OS_EVENT_WAIT_RET_ALERTED = -1, MONO_OS_EVENT_WAIT_RET_TIMEOUT = -2, } MonoOSEventWaitRet; typedef struct _MonoOSEvent MonoOSEvent; typedef void (*MonoOSEventFreeCb) (MonoOSEvent*); struct _MonoOSEvent { #ifdef HOST_WIN32 gpointer handle; #else GPtrArray *conds; gboolean signalled; #endif }; MONO_API void mono_os_event_init (MonoOSEvent *event, gboolean initial); MONO_API void mono_os_event_destroy (MonoOSEvent *event); MONO_API void mono_os_event_set (MonoOSEvent *event); MONO_API void mono_os_event_reset (MonoOSEvent *event); MONO_API MonoOSEventWaitRet mono_os_event_wait_one (MonoOSEvent *event, guint32 timeout, gboolean alertable); MONO_API MonoOSEventWaitRet mono_os_event_wait_multiple (MonoOSEvent **events, gsize nevents, gboolean waitall, guint32 timeout, gboolean alertable); #endif /* _MONO_UTILS_OS_EVENT_H_ */
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/vm/threadpoolrequest.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //========================================================================= // // ThreadPoolRequest.h // // // This file contains definitions of classes needed to mainain per-appdomain // thread pool work requests. This is needed as unmanaged and managed work // requests are allocted, managed and dispatched in drastically different ways. // However, the scheduler need be aware of these differences, and it should // simply talk to a common interface for managing work request counts. // //========================================================================= #ifndef _THREADPOOL_REQUEST_H #define _THREADPOOL_REQUEST_H #include "util.hpp" #define TP_QUANTUM 2 #define UNUSED_THREADPOOL_INDEX (DWORD)-1 //-------------------------------------------------------------------------- //IPerAppDomainTPCount is an interface for implementing per-appdomain thread //pool state. It's implementation should include logic to maintain work-counts, //notify thread pool class when work arrives or no work is left. Finally //there is logic to dipatch work items correctly in the right domain. // //Notes: //This class was designed to support both the managed and unmanaged uses //of thread pool. The unmananged part may directly be used through com //interfaces. The differences between the actual management of counts and //dispatching of work is quite different between the two. This interface //hides these differences to the thread scheduler implemented by the thread pool //class. // class IPerAppDomainTPCount{ public: virtual void ResetState() = 0; virtual BOOL IsRequestPending() = 0; //This functions marks the beginning of requests queued for the domain. //It needs to notify the scheduler of work-arrival among other things. virtual void SetAppDomainRequestsActive() = 0; //This functions marks the end of requests queued for this domain. virtual void ClearAppDomainRequestsActive() = 0; //Clears the "active" flag if it was set, and returns whether it was set. virtual bool TakeActiveRequest() = 0; //Takes care of dispatching requests in the right domain. virtual void DispatchWorkItem(bool* foundWork, bool* wasNotRecalled) = 0; virtual void SetTPIndexUnused() = 0; virtual BOOL IsTPIndexUnused() = 0; virtual void SetTPIndex(TPIndex index) = 0; }; typedef DPTR(IPerAppDomainTPCount) PTR_IPerAppDomainTPCount; #ifdef _MSC_VER // Disable this warning - we intentionally want __declspec(align()) to insert padding for us #pragma warning(disable: 4324) // structure was padded due to __declspec(align()) #endif //-------------------------------------------------------------------------- //ManagedPerAppDomainTPCount maintains per-appdomain thread pool state. //This class maintains the count of per-appdomain work-items queued by //ThreadPool.QueueUserWorkItem. It also dispatches threads in the appdomain //correctly by setting up the right exception handling frames etc. // //Note: The counts are not accurate, and neither do they need to be. The //actual work queue is in managed (implemented in threadpool.cs). This class //just provides heuristics to the thread pool scheduler, along with //synchronization to indicate start/end of requests to the scheduler. class ManagedPerAppDomainTPCount : public IPerAppDomainTPCount { public: ManagedPerAppDomainTPCount(TPIndex index) {ResetState(); m_index = index;} inline void ResetState() { LIMITED_METHOD_CONTRACT; VolatileStore(&m_numRequestsPending, (LONG)0); } inline BOOL IsRequestPending() { LIMITED_METHOD_CONTRACT; LONG count = VolatileLoad(&m_numRequestsPending); return count > 0; } void SetAppDomainRequestsActive(); void ClearAppDomainRequestsActive(); bool TakeActiveRequest(); inline void SetTPIndex(TPIndex index) { LIMITED_METHOD_CONTRACT; //This function should be called during appdomain creation when no managed code //has started running yet. That implies, no requests should be pending //or dispatched to this structure yet. _ASSERTE(m_index.m_dwIndex == UNUSED_THREADPOOL_INDEX); m_index = index; } inline BOOL IsTPIndexUnused() { LIMITED_METHOD_CONTRACT; if (m_index.m_dwIndex == UNUSED_THREADPOOL_INDEX) { return TRUE; } return FALSE; } inline void SetTPIndexUnused() { WRAPPER_NO_CONTRACT; m_index.m_dwIndex = UNUSED_THREADPOOL_INDEX; } void DispatchWorkItem(bool* foundWork, bool* wasNotRecalled); private: TPIndex m_index; struct DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) { BYTE m_padding1[MAX_CACHE_LINE_SIZE - sizeof(LONG)]; // Only use with VolatileLoad+VolatileStore+FastInterlockCompareExchange LONG m_numRequestsPending; BYTE m_padding2[MAX_CACHE_LINE_SIZE]; }; }; //-------------------------------------------------------------------------- //UnManagedPerAppDomainTPCount maintains the thread pool state/counts for //unmanaged work requests. From thread pool point of view we treat unmanaged //requests as a special "appdomain". This helps in scheduling policies, and //follow same fairness policies as requests in other appdomains. class UnManagedPerAppDomainTPCount : public IPerAppDomainTPCount { public: UnManagedPerAppDomainTPCount() { LIMITED_METHOD_CONTRACT; ResetState(); } inline void ResetState() { LIMITED_METHOD_CONTRACT; m_NumRequests = 0; VolatileStore(&m_outstandingThreadRequestCount, (LONG)0); } inline BOOL IsRequestPending() { LIMITED_METHOD_CONTRACT; return VolatileLoad(&m_outstandingThreadRequestCount) != (LONG)0 ? TRUE : FALSE; } void SetAppDomainRequestsActive(); inline void ClearAppDomainRequestsActive() { LIMITED_METHOD_CONTRACT; VolatileStore(&m_outstandingThreadRequestCount, (LONG)0); } bool TakeActiveRequest(); void QueueUnmanagedWorkRequest(LPTHREAD_START_ROUTINE function, PVOID context); PVOID DeQueueUnManagedWorkRequest(bool* lastOne); void DispatchWorkItem(bool* foundWork, bool* wasNotRecalled); inline void SetTPIndexUnused() { WRAPPER_NO_CONTRACT; _ASSERT(FALSE); } inline BOOL IsTPIndexUnused() { WRAPPER_NO_CONTRACT; _ASSERT(FALSE); return FALSE; } inline void SetTPIndex(TPIndex index) { WRAPPER_NO_CONTRACT; _ASSERT(FALSE); } inline ULONG GetNumRequests() { LIMITED_METHOD_CONTRACT; return VolatileLoad(&m_NumRequests); } private: SpinLock m_lock; ULONG m_NumRequests; struct DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) { BYTE m_padding1[MAX_CACHE_LINE_SIZE - sizeof(LONG)]; // Only use with VolatileLoad+VolatileStore+FastInterlockCompareExchange LONG m_outstandingThreadRequestCount; BYTE m_padding2[MAX_CACHE_LINE_SIZE]; }; }; #ifdef _MSC_VER #pragma warning(default: 4324) // structure was padded due to __declspec(align()) #endif //-------------------------------------------------------------------------- //PerAppDomainTPCountList maintains the collection of per-appdomain thread //pool states. Per appdomain counts are added to the list during appdomain //creation inside the sdomain lock. The counts are reset during appdomain //unload after all the threads have //This class maintains the count of per-appdomain work-items queued by //ThreadPool.QueueUserWorkItem. It also dispatches threads in the appdomain //correctly by setting up the right exception handling frames etc. // //Note: The counts are not accurate, and neither do they need to be. The //actual work queue is in managed (implemented in threadpool.cs). This class //just provides heuristics to the thread pool scheduler, along with //synchronization to indicate start/end of requests to the scheduler. class PerAppDomainTPCountList{ public: static void InitAppDomainIndexList(); static void ResetAppDomainIndex(TPIndex index); static bool AreRequestsPendingInAnyAppDomains(); static LONG GetAppDomainIndexForThreadpoolDispatch(); static TPIndex AddNewTPIndex(); inline static IPerAppDomainTPCount* GetPerAppdomainCount(TPIndex index) { return dac_cast<PTR_IPerAppDomainTPCount>(s_appDomainIndexList.Get(index.m_dwIndex-1)); } inline static UnManagedPerAppDomainTPCount* GetUnmanagedTPCount() { return &s_unmanagedTPCount; } private: static DWORD FindFirstFreeTpEntry(); static BYTE s_padding[MAX_CACHE_LINE_SIZE - sizeof(LONG)]; DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) static LONG s_ADHint; DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) static UnManagedPerAppDomainTPCount s_unmanagedTPCount; //The list of all per-appdomain work-request counts. static ArrayListStatic s_appDomainIndexList; }; #endif //_THREADPOOL_REQUEST_H
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //========================================================================= // // ThreadPoolRequest.h // // // This file contains definitions of classes needed to mainain per-appdomain // thread pool work requests. This is needed as unmanaged and managed work // requests are allocted, managed and dispatched in drastically different ways. // However, the scheduler need be aware of these differences, and it should // simply talk to a common interface for managing work request counts. // //========================================================================= #ifndef _THREADPOOL_REQUEST_H #define _THREADPOOL_REQUEST_H #include "util.hpp" #define TP_QUANTUM 2 #define UNUSED_THREADPOOL_INDEX (DWORD)-1 //-------------------------------------------------------------------------- //IPerAppDomainTPCount is an interface for implementing per-appdomain thread //pool state. It's implementation should include logic to maintain work-counts, //notify thread pool class when work arrives or no work is left. Finally //there is logic to dipatch work items correctly in the right domain. // //Notes: //This class was designed to support both the managed and unmanaged uses //of thread pool. The unmananged part may directly be used through com //interfaces. The differences between the actual management of counts and //dispatching of work is quite different between the two. This interface //hides these differences to the thread scheduler implemented by the thread pool //class. // class IPerAppDomainTPCount{ public: virtual void ResetState() = 0; virtual BOOL IsRequestPending() = 0; //This functions marks the beginning of requests queued for the domain. //It needs to notify the scheduler of work-arrival among other things. virtual void SetAppDomainRequestsActive() = 0; //This functions marks the end of requests queued for this domain. virtual void ClearAppDomainRequestsActive() = 0; //Clears the "active" flag if it was set, and returns whether it was set. virtual bool TakeActiveRequest() = 0; //Takes care of dispatching requests in the right domain. virtual void DispatchWorkItem(bool* foundWork, bool* wasNotRecalled) = 0; virtual void SetTPIndexUnused() = 0; virtual BOOL IsTPIndexUnused() = 0; virtual void SetTPIndex(TPIndex index) = 0; }; typedef DPTR(IPerAppDomainTPCount) PTR_IPerAppDomainTPCount; #ifdef _MSC_VER // Disable this warning - we intentionally want __declspec(align()) to insert padding for us #pragma warning(disable: 4324) // structure was padded due to __declspec(align()) #endif //-------------------------------------------------------------------------- //ManagedPerAppDomainTPCount maintains per-appdomain thread pool state. //This class maintains the count of per-appdomain work-items queued by //ThreadPool.QueueUserWorkItem. It also dispatches threads in the appdomain //correctly by setting up the right exception handling frames etc. // //Note: The counts are not accurate, and neither do they need to be. The //actual work queue is in managed (implemented in threadpool.cs). This class //just provides heuristics to the thread pool scheduler, along with //synchronization to indicate start/end of requests to the scheduler. class ManagedPerAppDomainTPCount : public IPerAppDomainTPCount { public: ManagedPerAppDomainTPCount(TPIndex index) {ResetState(); m_index = index;} inline void ResetState() { LIMITED_METHOD_CONTRACT; VolatileStore(&m_numRequestsPending, (LONG)0); } inline BOOL IsRequestPending() { LIMITED_METHOD_CONTRACT; LONG count = VolatileLoad(&m_numRequestsPending); return count > 0; } void SetAppDomainRequestsActive(); void ClearAppDomainRequestsActive(); bool TakeActiveRequest(); inline void SetTPIndex(TPIndex index) { LIMITED_METHOD_CONTRACT; //This function should be called during appdomain creation when no managed code //has started running yet. That implies, no requests should be pending //or dispatched to this structure yet. _ASSERTE(m_index.m_dwIndex == UNUSED_THREADPOOL_INDEX); m_index = index; } inline BOOL IsTPIndexUnused() { LIMITED_METHOD_CONTRACT; if (m_index.m_dwIndex == UNUSED_THREADPOOL_INDEX) { return TRUE; } return FALSE; } inline void SetTPIndexUnused() { WRAPPER_NO_CONTRACT; m_index.m_dwIndex = UNUSED_THREADPOOL_INDEX; } void DispatchWorkItem(bool* foundWork, bool* wasNotRecalled); private: TPIndex m_index; struct DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) { BYTE m_padding1[MAX_CACHE_LINE_SIZE - sizeof(LONG)]; // Only use with VolatileLoad+VolatileStore+FastInterlockCompareExchange LONG m_numRequestsPending; BYTE m_padding2[MAX_CACHE_LINE_SIZE]; }; }; //-------------------------------------------------------------------------- //UnManagedPerAppDomainTPCount maintains the thread pool state/counts for //unmanaged work requests. From thread pool point of view we treat unmanaged //requests as a special "appdomain". This helps in scheduling policies, and //follow same fairness policies as requests in other appdomains. class UnManagedPerAppDomainTPCount : public IPerAppDomainTPCount { public: UnManagedPerAppDomainTPCount() { LIMITED_METHOD_CONTRACT; ResetState(); } inline void ResetState() { LIMITED_METHOD_CONTRACT; m_NumRequests = 0; VolatileStore(&m_outstandingThreadRequestCount, (LONG)0); } inline BOOL IsRequestPending() { LIMITED_METHOD_CONTRACT; return VolatileLoad(&m_outstandingThreadRequestCount) != (LONG)0 ? TRUE : FALSE; } void SetAppDomainRequestsActive(); inline void ClearAppDomainRequestsActive() { LIMITED_METHOD_CONTRACT; VolatileStore(&m_outstandingThreadRequestCount, (LONG)0); } bool TakeActiveRequest(); void QueueUnmanagedWorkRequest(LPTHREAD_START_ROUTINE function, PVOID context); PVOID DeQueueUnManagedWorkRequest(bool* lastOne); void DispatchWorkItem(bool* foundWork, bool* wasNotRecalled); inline void SetTPIndexUnused() { WRAPPER_NO_CONTRACT; _ASSERT(FALSE); } inline BOOL IsTPIndexUnused() { WRAPPER_NO_CONTRACT; _ASSERT(FALSE); return FALSE; } inline void SetTPIndex(TPIndex index) { WRAPPER_NO_CONTRACT; _ASSERT(FALSE); } inline ULONG GetNumRequests() { LIMITED_METHOD_CONTRACT; return VolatileLoad(&m_NumRequests); } private: SpinLock m_lock; ULONG m_NumRequests; struct DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) { BYTE m_padding1[MAX_CACHE_LINE_SIZE - sizeof(LONG)]; // Only use with VolatileLoad+VolatileStore+FastInterlockCompareExchange LONG m_outstandingThreadRequestCount; BYTE m_padding2[MAX_CACHE_LINE_SIZE]; }; }; #ifdef _MSC_VER #pragma warning(default: 4324) // structure was padded due to __declspec(align()) #endif //-------------------------------------------------------------------------- //PerAppDomainTPCountList maintains the collection of per-appdomain thread //pool states. Per appdomain counts are added to the list during appdomain //creation inside the sdomain lock. The counts are reset during appdomain //unload after all the threads have //This class maintains the count of per-appdomain work-items queued by //ThreadPool.QueueUserWorkItem. It also dispatches threads in the appdomain //correctly by setting up the right exception handling frames etc. // //Note: The counts are not accurate, and neither do they need to be. The //actual work queue is in managed (implemented in threadpool.cs). This class //just provides heuristics to the thread pool scheduler, along with //synchronization to indicate start/end of requests to the scheduler. class PerAppDomainTPCountList{ public: static void InitAppDomainIndexList(); static void ResetAppDomainIndex(TPIndex index); static bool AreRequestsPendingInAnyAppDomains(); static LONG GetAppDomainIndexForThreadpoolDispatch(); static TPIndex AddNewTPIndex(); inline static IPerAppDomainTPCount* GetPerAppdomainCount(TPIndex index) { return dac_cast<PTR_IPerAppDomainTPCount>(s_appDomainIndexList.Get(index.m_dwIndex-1)); } inline static UnManagedPerAppDomainTPCount* GetUnmanagedTPCount() { return &s_unmanagedTPCount; } private: static DWORD FindFirstFreeTpEntry(); static BYTE s_padding[MAX_CACHE_LINE_SIZE - sizeof(LONG)]; DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) static LONG s_ADHint; DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) static UnManagedPerAppDomainTPCount s_unmanagedTPCount; //The list of all per-appdomain work-request counts. static ArrayListStatic s_appDomainIndexList; }; #endif //_THREADPOOL_REQUEST_H
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/mono/mono/mini/llvmonly-runtime.h
/** * \file * Copyright 2002-2003 Ximian Inc * Copyright 2003-2011 Novell Inc * Copyright 2011 Xamarin Inc * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #ifndef __MONO_LLVMONLY_RUNTIME_H__ #define __MONO_LLVMONLY_RUNTIME_H__ #include "mini-runtime.h" #include "aot-runtime.h" gpointer mini_llvmonly_load_method (MonoMethod *method, gboolean caller_gsharedvt, gboolean need_unbox, gpointer *out_arg, MonoError *error); MonoFtnDesc* mini_llvmonly_load_method_ftndesc (MonoMethod *method, gboolean caller_gsharedvt, gboolean need_unbox, MonoError *error); gpointer mini_llvmonly_load_method_delegate (MonoMethod *method, gboolean caller_gsharedvt, gboolean need_unbox, gpointer *out_arg, MonoError *error); gpointer mini_llvmonly_get_delegate_arg (MonoMethod *method, gpointer method_ptr); gpointer mini_llvmonly_add_method_wrappers (MonoMethod *m, gpointer compiled_method, gboolean caller_gsharedvt, gboolean add_unbox_tramp, gpointer *out_arg); MonoFtnDesc *mini_llvmonly_create_ftndesc (MonoMethod *m, gpointer addr, gpointer arg); gpointer mini_llvmonly_get_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp); gpointer mini_llvmonly_get_vtable_trampoline (MonoVTable *vt, int slot_index, int index); G_EXTERN_C gpointer mini_llvmonly_init_vtable_slot (MonoVTable *vtable, int slot); G_EXTERN_C gpointer mini_llvmonly_resolve_vcall_gsharedvt (MonoObject *this_obj, int imt_slot, MonoMethod *imt_method, gpointer *out_arg); G_EXTERN_C gpointer mini_llvmonly_resolve_iface_call_gsharedvt (MonoObject *this_obj, int imt_slot, MonoMethod *imt_method, gpointer *out_arg); G_EXTERN_C MonoFtnDesc* mini_llvmonly_resolve_generic_virtual_call (MonoVTable *vt, int slot, MonoMethod *imt_method); G_EXTERN_C MonoFtnDesc* mini_llvmonly_resolve_generic_virtual_iface_call (MonoVTable *vt, int imt_slot, MonoMethod *imt_method); G_EXTERN_C MonoFtnDesc* mini_llvmonly_resolve_vcall_gsharedvt_fast (MonoObject *this_obj, int slot); G_EXTERN_C void mini_llvmonly_init_delegate (MonoDelegate *del, MonoDelegateTrampInfo *info); G_EXTERN_C void mini_llvmonly_init_delegate_virtual (MonoDelegate *del, MonoObject *target, MonoMethod *method); /* Used for regular llvm as well */ G_EXTERN_C void mini_llvm_init_method (MonoAotFileInfo *info, gpointer aot_module, gpointer method_info, MonoVTable *vtable); G_EXTERN_C void mini_llvmonly_throw_nullref_exception (void); G_EXTERN_C void mini_llvmonly_throw_aot_failed_exception (const char *name); G_EXTERN_C void mini_llvmonly_pop_lmf (MonoLMF *lmf); G_EXTERN_C void mini_llvmonly_interp_entry_gsharedvt (gpointer imethod, gpointer res, gpointer *args); /* These are implemented in mini-exceptions.c */ G_EXTERN_C void mini_llvmonly_throw_exception (MonoObject *ex); G_EXTERN_C void mini_llvmonly_rethrow_exception (MonoObject *ex); G_EXTERN_C void mini_llvmonly_throw_corlib_exception (guint32 ex_token_index); G_EXTERN_C void mini_llvmonly_resume_exception (void); G_EXTERN_C void mini_llvmonly_resume_exception_il_state (MonoLMF *lmf, gpointer info); G_EXTERN_C MonoObject *mini_llvmonly_load_exception (void); G_EXTERN_C void mini_llvmonly_clear_exception (void); G_EXTERN_C gint32 mini_llvmonly_match_exception (MonoJitInfo *jinfo, guint32 region_start, guint32 region_end, gpointer rgctx, MonoObject *this_obj); #endif
/** * \file * Copyright 2002-2003 Ximian Inc * Copyright 2003-2011 Novell Inc * Copyright 2011 Xamarin Inc * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #ifndef __MONO_LLVMONLY_RUNTIME_H__ #define __MONO_LLVMONLY_RUNTIME_H__ #include "mini-runtime.h" #include "aot-runtime.h" gpointer mini_llvmonly_load_method (MonoMethod *method, gboolean caller_gsharedvt, gboolean need_unbox, gpointer *out_arg, MonoError *error); MonoFtnDesc* mini_llvmonly_load_method_ftndesc (MonoMethod *method, gboolean caller_gsharedvt, gboolean need_unbox, MonoError *error); gpointer mini_llvmonly_load_method_delegate (MonoMethod *method, gboolean caller_gsharedvt, gboolean need_unbox, gpointer *out_arg, MonoError *error); gpointer mini_llvmonly_get_delegate_arg (MonoMethod *method, gpointer method_ptr); gpointer mini_llvmonly_add_method_wrappers (MonoMethod *m, gpointer compiled_method, gboolean caller_gsharedvt, gboolean add_unbox_tramp, gpointer *out_arg); MonoFtnDesc *mini_llvmonly_create_ftndesc (MonoMethod *m, gpointer addr, gpointer arg); gpointer mini_llvmonly_get_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp); gpointer mini_llvmonly_get_vtable_trampoline (MonoVTable *vt, int slot_index, int index); G_EXTERN_C gpointer mini_llvmonly_init_vtable_slot (MonoVTable *vtable, int slot); G_EXTERN_C gpointer mini_llvmonly_resolve_vcall_gsharedvt (MonoObject *this_obj, int imt_slot, MonoMethod *imt_method, gpointer *out_arg); G_EXTERN_C gpointer mini_llvmonly_resolve_iface_call_gsharedvt (MonoObject *this_obj, int imt_slot, MonoMethod *imt_method, gpointer *out_arg); G_EXTERN_C MonoFtnDesc* mini_llvmonly_resolve_generic_virtual_call (MonoVTable *vt, int slot, MonoMethod *imt_method); G_EXTERN_C MonoFtnDesc* mini_llvmonly_resolve_generic_virtual_iface_call (MonoVTable *vt, int imt_slot, MonoMethod *imt_method); G_EXTERN_C MonoFtnDesc* mini_llvmonly_resolve_vcall_gsharedvt_fast (MonoObject *this_obj, int slot); G_EXTERN_C void mini_llvmonly_init_delegate (MonoDelegate *del, MonoDelegateTrampInfo *info); G_EXTERN_C void mini_llvmonly_init_delegate_virtual (MonoDelegate *del, MonoObject *target, MonoMethod *method); /* Used for regular llvm as well */ G_EXTERN_C void mini_llvm_init_method (MonoAotFileInfo *info, gpointer aot_module, gpointer method_info, MonoVTable *vtable); G_EXTERN_C void mini_llvmonly_throw_nullref_exception (void); G_EXTERN_C void mini_llvmonly_throw_aot_failed_exception (const char *name); G_EXTERN_C void mini_llvmonly_pop_lmf (MonoLMF *lmf); G_EXTERN_C void mini_llvmonly_interp_entry_gsharedvt (gpointer imethod, gpointer res, gpointer *args); /* These are implemented in mini-exceptions.c */ G_EXTERN_C void mini_llvmonly_throw_exception (MonoObject *ex); G_EXTERN_C void mini_llvmonly_rethrow_exception (MonoObject *ex); G_EXTERN_C void mini_llvmonly_throw_corlib_exception (guint32 ex_token_index); G_EXTERN_C void mini_llvmonly_resume_exception (void); G_EXTERN_C void mini_llvmonly_resume_exception_il_state (MonoLMF *lmf, gpointer info); G_EXTERN_C MonoObject *mini_llvmonly_load_exception (void); G_EXTERN_C void mini_llvmonly_clear_exception (void); G_EXTERN_C gint32 mini_llvmonly_match_exception (MonoJitInfo *jinfo, guint32 region_start, guint32 region_end, gpointer rgctx, MonoObject *this_obj); #endif
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/pal/tests/palsuite/c_runtime/_vsnprintf_s/test19/test19.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test19.c ** ** Purpose: Test #19 for the _vsnprintf function. ** ** **===================================================================*/ #include <palsuite.h> #include "../_vsnprintf_s.h" /* * Notes: memcmp is used, as is strlen. */ #define DOTEST(a,b,c,d,e) DoTest(a,b,(void*)c,d,e) void DoArgumentPrecTest(char *formatstr, int precision, void *param, char *paramstr, char *checkstr1, char *checkstr2) { char buf[256]; Testvsnprintf(buf,256,formatstr, precision, param); if (memcmp(buf, checkstr1, strlen(checkstr1) + 1) != 0 && memcmp(buf, checkstr2, strlen(checkstr2) + 1) != 0) { Fail("ERROR: failed to insert %s into \"%s\" with precision %d\n" "Expected \"%s\" or \"%s\", got \"%s\".\n", paramstr, formatstr, precision, checkstr1, checkstr2, buf); } } void DoArgumentPrecDoubleTest(char *formatstr, int precision, double param, char *checkstr1, char *checkstr2) { char buf[256]; Testvsnprintf(buf,256,formatstr, precision, param); if (memcmp(buf, checkstr1, strlen(checkstr1) + 1) != 0 && memcmp(buf, checkstr2, strlen(checkstr2) + 1) != 0) { Fail("ERROR: failed to insert %f into \"%s\" with precision %d\n" "Expected \"%s\" or \"%s\", got \"%s\".\n", param, formatstr, precision, checkstr1, checkstr2, buf); } } PALTEST(c_runtime__vsnprintf_s_test19_paltest_vsnprintf_test19, "c_runtime/_vsnprintf_s/test19/paltest_vsnprintf_test19") { if (PAL_Initialize(argc, argv) != 0) { return(FAIL); } DoArgumentPrecTest("%.*s", 2, (void*)"bar", "bar", "ba", "ba"); DoArgumentPrecTest("%.*S", 2, (void*)convert("bar"), "bar", "ba", "ba"); DoArgumentPrecTest("%.*c", 0, (void*)'a', "a", "a", "a"); DoArgumentPrecTest("%.*c", 4, (void*)'a', "a", "a", "a"); DoArgumentPrecTest("%.*C", 0, (void*)'a', "a", "a", "a"); DoArgumentPrecTest("%.*C", 4, (void*)'a', "a", "a", "a"); DoArgumentPrecTest("%.*d", 1, (void*)42, "42", "42", "42"); DoArgumentPrecTest("%.*d", 3, (void*)42, "42", "042", "042"); DoArgumentPrecTest("%.*i", 1, (void*)42, "42", "42", "42"); DoArgumentPrecTest("%.*i", 3, (void*)42, "42", "042", "042"); DoArgumentPrecTest("%.*o", 1, (void*)42, "42", "52", "52"); DoArgumentPrecTest("%.*o", 3, (void*)42, "42", "052", "052"); DoArgumentPrecTest("%.*u", 1, (void*)42, "42", "42", "42"); DoArgumentPrecTest("%.*u", 3, (void*)42, "42", "042", "042"); DoArgumentPrecTest("%.*x", 1, (void*)0x42, "0x42", "42", "42"); DoArgumentPrecTest("%.*x", 3, (void*)0x42, "0x42", "042", "042"); DoArgumentPrecTest("%.*X", 1, (void*)0x42, "0x42", "42", "42"); DoArgumentPrecTest("%.*X", 3, (void*)0x42, "0x42", "042", "042"); DoArgumentPrecDoubleTest("%.*e", 1, 2.01, "2.0e+000", "2.0e+00"); DoArgumentPrecDoubleTest("%.*e", 3, 2.01, "2.010e+000", "2.010e+00"); DoArgumentPrecDoubleTest("%.*E", 1, 2.01, "2.0E+000", "2.0E+00"); DoArgumentPrecDoubleTest("%.*E", 3, 2.01, "2.010E+000", "2.010E+00"); DoArgumentPrecDoubleTest("%.*f", 1, 2.01, "2.0", "2.0"); DoArgumentPrecDoubleTest("%.*f", 3, 2.01, "2.010", "2.010"); DoArgumentPrecDoubleTest("%.*g", 1, 256.01, "3e+002", "3e+02"); DoArgumentPrecDoubleTest("%.*g", 3, 256.01, "256", "256"); DoArgumentPrecDoubleTest("%.*g", 4, 256.01, "256", "256"); DoArgumentPrecDoubleTest("%.*g", 6, 256.01, "256.01", "256.01"); DoArgumentPrecDoubleTest("%.*G", 1, 256.01, "3E+002", "3E+02"); DoArgumentPrecDoubleTest("%.*G", 3, 256.01, "256", "256"); DoArgumentPrecDoubleTest("%.*G", 4, 256.01, "256", "256"); DoArgumentPrecDoubleTest("%.*G", 6, 256.01, "256.01", "256.01"); PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test19.c ** ** Purpose: Test #19 for the _vsnprintf function. ** ** **===================================================================*/ #include <palsuite.h> #include "../_vsnprintf_s.h" /* * Notes: memcmp is used, as is strlen. */ #define DOTEST(a,b,c,d,e) DoTest(a,b,(void*)c,d,e) void DoArgumentPrecTest(char *formatstr, int precision, void *param, char *paramstr, char *checkstr1, char *checkstr2) { char buf[256]; Testvsnprintf(buf,256,formatstr, precision, param); if (memcmp(buf, checkstr1, strlen(checkstr1) + 1) != 0 && memcmp(buf, checkstr2, strlen(checkstr2) + 1) != 0) { Fail("ERROR: failed to insert %s into \"%s\" with precision %d\n" "Expected \"%s\" or \"%s\", got \"%s\".\n", paramstr, formatstr, precision, checkstr1, checkstr2, buf); } } void DoArgumentPrecDoubleTest(char *formatstr, int precision, double param, char *checkstr1, char *checkstr2) { char buf[256]; Testvsnprintf(buf,256,formatstr, precision, param); if (memcmp(buf, checkstr1, strlen(checkstr1) + 1) != 0 && memcmp(buf, checkstr2, strlen(checkstr2) + 1) != 0) { Fail("ERROR: failed to insert %f into \"%s\" with precision %d\n" "Expected \"%s\" or \"%s\", got \"%s\".\n", param, formatstr, precision, checkstr1, checkstr2, buf); } } PALTEST(c_runtime__vsnprintf_s_test19_paltest_vsnprintf_test19, "c_runtime/_vsnprintf_s/test19/paltest_vsnprintf_test19") { if (PAL_Initialize(argc, argv) != 0) { return(FAIL); } DoArgumentPrecTest("%.*s", 2, (void*)"bar", "bar", "ba", "ba"); DoArgumentPrecTest("%.*S", 2, (void*)convert("bar"), "bar", "ba", "ba"); DoArgumentPrecTest("%.*c", 0, (void*)'a', "a", "a", "a"); DoArgumentPrecTest("%.*c", 4, (void*)'a', "a", "a", "a"); DoArgumentPrecTest("%.*C", 0, (void*)'a', "a", "a", "a"); DoArgumentPrecTest("%.*C", 4, (void*)'a', "a", "a", "a"); DoArgumentPrecTest("%.*d", 1, (void*)42, "42", "42", "42"); DoArgumentPrecTest("%.*d", 3, (void*)42, "42", "042", "042"); DoArgumentPrecTest("%.*i", 1, (void*)42, "42", "42", "42"); DoArgumentPrecTest("%.*i", 3, (void*)42, "42", "042", "042"); DoArgumentPrecTest("%.*o", 1, (void*)42, "42", "52", "52"); DoArgumentPrecTest("%.*o", 3, (void*)42, "42", "052", "052"); DoArgumentPrecTest("%.*u", 1, (void*)42, "42", "42", "42"); DoArgumentPrecTest("%.*u", 3, (void*)42, "42", "042", "042"); DoArgumentPrecTest("%.*x", 1, (void*)0x42, "0x42", "42", "42"); DoArgumentPrecTest("%.*x", 3, (void*)0x42, "0x42", "042", "042"); DoArgumentPrecTest("%.*X", 1, (void*)0x42, "0x42", "42", "42"); DoArgumentPrecTest("%.*X", 3, (void*)0x42, "0x42", "042", "042"); DoArgumentPrecDoubleTest("%.*e", 1, 2.01, "2.0e+000", "2.0e+00"); DoArgumentPrecDoubleTest("%.*e", 3, 2.01, "2.010e+000", "2.010e+00"); DoArgumentPrecDoubleTest("%.*E", 1, 2.01, "2.0E+000", "2.0E+00"); DoArgumentPrecDoubleTest("%.*E", 3, 2.01, "2.010E+000", "2.010E+00"); DoArgumentPrecDoubleTest("%.*f", 1, 2.01, "2.0", "2.0"); DoArgumentPrecDoubleTest("%.*f", 3, 2.01, "2.010", "2.010"); DoArgumentPrecDoubleTest("%.*g", 1, 256.01, "3e+002", "3e+02"); DoArgumentPrecDoubleTest("%.*g", 3, 256.01, "256", "256"); DoArgumentPrecDoubleTest("%.*g", 4, 256.01, "256", "256"); DoArgumentPrecDoubleTest("%.*g", 6, 256.01, "256.01", "256.01"); DoArgumentPrecDoubleTest("%.*G", 1, 256.01, "3E+002", "3E+02"); DoArgumentPrecDoubleTest("%.*G", 3, 256.01, "256", "256"); DoArgumentPrecDoubleTest("%.*G", 4, 256.01, "256", "256"); DoArgumentPrecDoubleTest("%.*G", 6, 256.01, "256.01", "256.01"); PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/pal/src/safecrt/wcsncat_s.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*** *wcsncat_s.c - append n chars of string to new string * * *Purpose: * defines wcsncat_s() - appends n characters of string onto * end of other string * *******************************************************************************/ #define _SECURECRT_FILL_BUFFER 1 #define _SECURECRT_FILL_BUFFER_THRESHOLD ((size_t)8) #include <string.h> #include <errno.h> #include <limits.h> #include "internal_securecrt.h" #include "mbusafecrt_internal.h" #define _FUNC_PROLOGUE #define _FUNC_NAME wcsncat_s #define _CHAR char16_t #define _DEST _Dst #define _SIZE _SizeInWords #define _SRC _Src #define _COUNT _Count #include "tcsncat_s.inl"
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*** *wcsncat_s.c - append n chars of string to new string * * *Purpose: * defines wcsncat_s() - appends n characters of string onto * end of other string * *******************************************************************************/ #define _SECURECRT_FILL_BUFFER 1 #define _SECURECRT_FILL_BUFFER_THRESHOLD ((size_t)8) #include <string.h> #include <errno.h> #include <limits.h> #include "internal_securecrt.h" #include "mbusafecrt_internal.h" #define _FUNC_PROLOGUE #define _FUNC_NAME wcsncat_s #define _CHAR char16_t #define _DEST _Dst #define _SIZE _SizeInWords #define _SRC _Src #define _COUNT _Count #include "tcsncat_s.inl"
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/vm/formattype.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //This file just includes formattype.cpp from the inc directory. #include "common.h" #include "../inc/formattype.cpp"
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //This file just includes formattype.cpp from the inc directory. #include "common.h" #include "../inc/formattype.cpp"
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/mono/mono/metadata/image-internals.h
/** * \file * Copyright 2015 Xamarin Inc * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #ifndef __MONO_METADATA_IMAGE_INTERNALS_H__ #define __MONO_METADATA_IMAGE_INTERNALS_H__ #include <mono/metadata/image.h> #include <mono/metadata/loader-internals.h> MonoImage* mono_image_loaded_internal (MonoAssemblyLoadContext *alc, const char *name); MonoImage* mono_image_load_file_for_image_checked (MonoImage *image, int fileidx, MonoError *error); MonoImage* mono_image_load_module_checked (MonoImage *image, int idx, MonoError *error); MonoImage * mono_image_open_a_lot (MonoAssemblyLoadContext *alc, const char *fname, MonoImageOpenStatus *status); #endif /* __MONO_METADATA_IMAGE_INTERNALS_H__ */
/** * \file * Copyright 2015 Xamarin Inc * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #ifndef __MONO_METADATA_IMAGE_INTERNALS_H__ #define __MONO_METADATA_IMAGE_INTERNALS_H__ #include <mono/metadata/image.h> #include <mono/metadata/loader-internals.h> MonoImage* mono_image_loaded_internal (MonoAssemblyLoadContext *alc, const char *name); MonoImage* mono_image_load_file_for_image_checked (MonoImage *image, int fileidx, MonoError *error); MonoImage* mono_image_load_module_checked (MonoImage *image, int idx, MonoError *error); MonoImage * mono_image_open_a_lot (MonoAssemblyLoadContext *alc, const char *fname, MonoImageOpenStatus *status); #endif /* __MONO_METADATA_IMAGE_INTERNALS_H__ */
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/pal/src/safecrt/safecrt_woutput_s.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*** *safecrt_woutput_s.c - implementation of the _woutput family for safercrt.lib * * *Purpose: * This file contains the implementation of the _woutput family for safercrt.lib. * *Revision History: * 07-08-04 SJ Stub module created. * 07-13-04 AC Added support for floating-point types. * 07-29-04 AC Added macros for a safecrt version of mctowc and wctomb, which target ntdll.dll or msvcrt.dll * based on the _NTSUBSET_ #define * ****/ #define _SAFECRT_IMPL #define __STDC_LIMIT_MACROS #include "pal/palinternal.h" #include <string.h> #include <errno.h> #include <limits.h> #include <stdlib.h> #include <stdarg.h> #include <inttypes.h> #include "internal_securecrt.h" #include "mbusafecrt_internal.h" #ifndef _UNICODE /* CRT flag */ #define _UNICODE 1 #endif #ifndef UNICODE /* NT flag */ #define UNICODE 1 #endif #define FORMAT_VALIDATIONS #if defined(_NTSUBSET_) #define _MBTOWC _safecrt_mbtowc #endif #define _WCTOMB_S _safecrt_wctomb_s #define _CFLTCVT _safecrt_cfltcvt #define _CLDCVT _safecrt_cldcvt #define _TCHAR CRT_TCHAR #define TCHAR CRTTCHAR typedef char16_t _TCHAR; typedef char16_t TCHAR; #define _T(x) L##x #include "output.inl"
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*** *safecrt_woutput_s.c - implementation of the _woutput family for safercrt.lib * * *Purpose: * This file contains the implementation of the _woutput family for safercrt.lib. * *Revision History: * 07-08-04 SJ Stub module created. * 07-13-04 AC Added support for floating-point types. * 07-29-04 AC Added macros for a safecrt version of mctowc and wctomb, which target ntdll.dll or msvcrt.dll * based on the _NTSUBSET_ #define * ****/ #define _SAFECRT_IMPL #define __STDC_LIMIT_MACROS #include "pal/palinternal.h" #include <string.h> #include <errno.h> #include <limits.h> #include <stdlib.h> #include <stdarg.h> #include <inttypes.h> #include "internal_securecrt.h" #include "mbusafecrt_internal.h" #ifndef _UNICODE /* CRT flag */ #define _UNICODE 1 #endif #ifndef UNICODE /* NT flag */ #define UNICODE 1 #endif #define FORMAT_VALIDATIONS #if defined(_NTSUBSET_) #define _MBTOWC _safecrt_mbtowc #endif #define _WCTOMB_S _safecrt_wctomb_s #define _CFLTCVT _safecrt_cfltcvt #define _CLDCVT _safecrt_cldcvt #define _TCHAR CRT_TCHAR #define TCHAR CRTTCHAR typedef char16_t _TCHAR; typedef char16_t TCHAR; #define _T(x) L##x #include "output.inl"
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/jit/hostallocator.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "jitpch.h" #include "hostallocator.h" void* HostAllocator::allocateHostMemory(size_t size) { assert(g_jitHost != nullptr); return g_jitHost->allocateMemory(size); } void HostAllocator::freeHostMemory(void* p) { assert(g_jitHost != nullptr); g_jitHost->freeMemory(p); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "jitpch.h" #include "hostallocator.h" void* HostAllocator::allocateHostMemory(size_t size) { assert(g_jitHost != nullptr); return g_jitHost->allocateMemory(size); } void HostAllocator::freeHostMemory(void* p) { assert(g_jitHost != nullptr); g_jitHost->freeMemory(p); }
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/tools/superpmi/superpmi-shim-simple/icorjitinfo.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef _ICorJitInfo #define _ICorJitInfo #include "runtimedetails.h" class interceptor_ICJI : public ICorJitInfo { #include "icorjitinfoimpl.h" public: // Added to help us track the original icji and be able to easily indirect // to it. And a simple way to keep one memory manager instance per instance. ICorJitInfo* original_ICorJitInfo; }; #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef _ICorJitInfo #define _ICorJitInfo #include "runtimedetails.h" class interceptor_ICJI : public ICorJitInfo { #include "icorjitinfoimpl.h" public: // Added to help us track the original icji and be able to easily indirect // to it. And a simple way to keep one memory manager instance per instance. ICorJitInfo* original_ICorJitInfo; }; #endif
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/native/libs/Common/pal_io_common.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include <stdlib.h> #include <assert.h> #include <poll.h> #include <pal_error_common.h> #include <pal_utilities.h> #include <minipal/utils.h> /** * Our intermediate pollfd struct to normalize the data types */ typedef struct { int32_t FileDescriptor; // The file descriptor to poll int16_t Events; // The events to poll for int16_t TriggeredEvents; // The events that triggered the poll } PollEvent; /** * Constants passed to and from poll describing what to poll for and what * kind of data was received from poll. */ typedef enum { PAL_POLLIN = 0x0001, /* non-urgent readable data available */ PAL_POLLPRI = 0x0002, /* urgent readable data available */ PAL_POLLOUT = 0x0004, /* data can be written without blocked */ PAL_POLLERR = 0x0008, /* an error occurred */ PAL_POLLHUP = 0x0010, /* the file descriptor hung up */ PAL_POLLNVAL = 0x0020, /* the requested events were invalid */ } PollEvents; inline static int32_t Common_Read(intptr_t fd, void* buffer, int32_t bufferSize) { assert(buffer != NULL || bufferSize == 0); assert(bufferSize >= 0); if (bufferSize < 0) { errno = EINVAL; return -1; } ssize_t count; while ((count = read(ToFileDescriptor(fd), buffer, (uint32_t)bufferSize)) < 0 && errno == EINTR); assert(count >= -1 && count <= bufferSize); return (int32_t)count; } inline static int32_t Common_Write(intptr_t fd, const void* buffer, int32_t bufferSize) { assert(buffer != NULL || bufferSize == 0); assert(bufferSize >= 0); if (bufferSize < 0) { errno = ERANGE; return -1; } ssize_t count; while ((count = write(ToFileDescriptor(fd), buffer, (uint32_t)bufferSize)) < 0 && errno == EINTR); assert(count >= -1 && count <= bufferSize); return (int32_t)count; } inline static int32_t Common_Poll(PollEvent* pollEvents, uint32_t eventCount, int32_t milliseconds, uint32_t* triggered) { if (pollEvents == NULL || triggered == NULL) { return Error_EFAULT; } if (milliseconds < -1) { return Error_EINVAL; } struct pollfd stackBuffer[(uint32_t)(2048/sizeof(struct pollfd))]; int useStackBuffer = eventCount <= ARRAY_SIZE(stackBuffer); struct pollfd* pollfds = NULL; if (useStackBuffer) { pollfds = &stackBuffer[0]; } else { pollfds = (struct pollfd*)calloc(eventCount, sizeof(*pollfds)); if (pollfds == NULL) { return Error_ENOMEM; } } for (uint32_t i = 0; i < eventCount; i++) { const PollEvent* event = &pollEvents[i]; pollfds[i].fd = event->FileDescriptor; // we need to do this for platforms like AIX where PAL_POLL* doesn't // match up to their reality; this is PollEvent -> system polling switch (event->Events) { case PAL_POLLIN: pollfds[i].events = POLLIN; break; case PAL_POLLPRI: pollfds[i].events = POLLPRI; break; case PAL_POLLOUT: pollfds[i].events = POLLOUT; break; case PAL_POLLERR: pollfds[i].events = POLLERR; break; case PAL_POLLHUP: pollfds[i].events = POLLHUP; break; case PAL_POLLNVAL: pollfds[i].events = POLLNVAL; break; default: pollfds[i].events = event->Events; break; } pollfds[i].revents = 0; } int rv; while ((rv = poll(pollfds, (nfds_t)eventCount, milliseconds)) < 0 && errno == EINTR); if (rv < 0) { if (!useStackBuffer) { free(pollfds); } *triggered = 0; return ConvertErrorPlatformToPal(errno); } for (uint32_t i = 0; i < eventCount; i++) { const struct pollfd* pfd = &pollfds[i]; assert(pfd->fd == pollEvents[i].FileDescriptor); assert(pfd->events == pollEvents[i].Events); // same as the other switch, just system -> PollEvent switch (pfd->revents) { case POLLIN: pollEvents[i].TriggeredEvents = PAL_POLLIN; break; case POLLPRI: pollEvents[i].TriggeredEvents = PAL_POLLPRI; break; case POLLOUT: pollEvents[i].TriggeredEvents = PAL_POLLOUT; break; case POLLERR: pollEvents[i].TriggeredEvents = PAL_POLLERR; break; case POLLHUP: pollEvents[i].TriggeredEvents = PAL_POLLHUP; break; case POLLNVAL: pollEvents[i].TriggeredEvents = PAL_POLLNVAL; break; default: pollEvents[i].TriggeredEvents = (int16_t)pfd->revents; break; } } *triggered = (uint32_t)rv; if (!useStackBuffer) { free(pollfds); } return Error_SUCCESS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include <stdlib.h> #include <assert.h> #include <poll.h> #include <pal_error_common.h> #include <pal_utilities.h> #include <minipal/utils.h> /** * Our intermediate pollfd struct to normalize the data types */ typedef struct { int32_t FileDescriptor; // The file descriptor to poll int16_t Events; // The events to poll for int16_t TriggeredEvents; // The events that triggered the poll } PollEvent; /** * Constants passed to and from poll describing what to poll for and what * kind of data was received from poll. */ typedef enum { PAL_POLLIN = 0x0001, /* non-urgent readable data available */ PAL_POLLPRI = 0x0002, /* urgent readable data available */ PAL_POLLOUT = 0x0004, /* data can be written without blocked */ PAL_POLLERR = 0x0008, /* an error occurred */ PAL_POLLHUP = 0x0010, /* the file descriptor hung up */ PAL_POLLNVAL = 0x0020, /* the requested events were invalid */ } PollEvents; inline static int32_t Common_Read(intptr_t fd, void* buffer, int32_t bufferSize) { assert(buffer != NULL || bufferSize == 0); assert(bufferSize >= 0); if (bufferSize < 0) { errno = EINVAL; return -1; } ssize_t count; while ((count = read(ToFileDescriptor(fd), buffer, (uint32_t)bufferSize)) < 0 && errno == EINTR); assert(count >= -1 && count <= bufferSize); return (int32_t)count; } inline static int32_t Common_Write(intptr_t fd, const void* buffer, int32_t bufferSize) { assert(buffer != NULL || bufferSize == 0); assert(bufferSize >= 0); if (bufferSize < 0) { errno = ERANGE; return -1; } ssize_t count; while ((count = write(ToFileDescriptor(fd), buffer, (uint32_t)bufferSize)) < 0 && errno == EINTR); assert(count >= -1 && count <= bufferSize); return (int32_t)count; } inline static int32_t Common_Poll(PollEvent* pollEvents, uint32_t eventCount, int32_t milliseconds, uint32_t* triggered) { if (pollEvents == NULL || triggered == NULL) { return Error_EFAULT; } if (milliseconds < -1) { return Error_EINVAL; } struct pollfd stackBuffer[(uint32_t)(2048/sizeof(struct pollfd))]; int useStackBuffer = eventCount <= ARRAY_SIZE(stackBuffer); struct pollfd* pollfds = NULL; if (useStackBuffer) { pollfds = &stackBuffer[0]; } else { pollfds = (struct pollfd*)calloc(eventCount, sizeof(*pollfds)); if (pollfds == NULL) { return Error_ENOMEM; } } for (uint32_t i = 0; i < eventCount; i++) { const PollEvent* event = &pollEvents[i]; pollfds[i].fd = event->FileDescriptor; // we need to do this for platforms like AIX where PAL_POLL* doesn't // match up to their reality; this is PollEvent -> system polling switch (event->Events) { case PAL_POLLIN: pollfds[i].events = POLLIN; break; case PAL_POLLPRI: pollfds[i].events = POLLPRI; break; case PAL_POLLOUT: pollfds[i].events = POLLOUT; break; case PAL_POLLERR: pollfds[i].events = POLLERR; break; case PAL_POLLHUP: pollfds[i].events = POLLHUP; break; case PAL_POLLNVAL: pollfds[i].events = POLLNVAL; break; default: pollfds[i].events = event->Events; break; } pollfds[i].revents = 0; } int rv; while ((rv = poll(pollfds, (nfds_t)eventCount, milliseconds)) < 0 && errno == EINTR); if (rv < 0) { if (!useStackBuffer) { free(pollfds); } *triggered = 0; return ConvertErrorPlatformToPal(errno); } for (uint32_t i = 0; i < eventCount; i++) { const struct pollfd* pfd = &pollfds[i]; assert(pfd->fd == pollEvents[i].FileDescriptor); assert(pfd->events == pollEvents[i].Events); // same as the other switch, just system -> PollEvent switch (pfd->revents) { case POLLIN: pollEvents[i].TriggeredEvents = PAL_POLLIN; break; case POLLPRI: pollEvents[i].TriggeredEvents = PAL_POLLPRI; break; case POLLOUT: pollEvents[i].TriggeredEvents = PAL_POLLOUT; break; case POLLERR: pollEvents[i].TriggeredEvents = PAL_POLLERR; break; case POLLHUP: pollEvents[i].TriggeredEvents = PAL_POLLHUP; break; case POLLNVAL: pollEvents[i].TriggeredEvents = PAL_POLLNVAL; break; default: pollEvents[i].TriggeredEvents = (int16_t)pfd->revents; break; } } *triggered = (uint32_t)rv; if (!useStackBuffer) { free(pollfds); } return Error_SUCCESS; }
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/vm/cachelinealloc.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //--------------------------------------------------------------------------- // CCacheLineAllocator // // // This file dImplements the CCacheLineAllocator class. // // @comm // // Notes: // The CacheLineAllocator maintains a pool of free CacheLines // // The CacheLine Allocator provides static member functions // GetCacheLine and FreeCacheLine, //--------------------------------------------------------------------------- #include "common.h" #include <stddef.h> #include "cachelinealloc.h" #include "threads.h" #include "excep.h" /////////////////////////////////////////////////////// // CCacheLineAllocator::CCacheLineAllocator() // ////////////////////////////////////////////////////// CCacheLineAllocator::CCacheLineAllocator() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; m_freeList32.Init(); m_freeList64.Init(); m_registryList.Init(); } /////////////////////////////////////////////////////// // void CCacheLineAllocator::~CCacheLineAllocator() // ////////////////////////////////////////////////////// CCacheLineAllocator::~CCacheLineAllocator() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; LPCacheLine tempPtr = NULL; while((tempPtr = m_registryList.RemoveHead()) != NULL) { for (int i =0; i < CacheLine::numEntries; i++) { if(tempPtr->m_pAddr[i] != NULL) { if (!g_fProcessDetach) VFree(tempPtr->m_pAddr[i]); } } delete tempPtr; } } /////////////////////////////////////////////////////// // static void *CCacheLineAllocator::VAlloc(ULONG cbSize) // ////////////////////////////////////////////////////// void *CCacheLineAllocator::VAlloc(ULONG cbSize) { CONTRACT(void*) { NOTHROW; GC_NOTRIGGER; MODE_ANY; INJECT_FAULT(CONTRACT_RETURN NULL); POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); } CONTRACT_END; // helper to call virtual free to release memory int i =0; void* pv = ClrVirtualAlloc (NULL, cbSize, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); if (pv != NULL) { LPCacheLine tempPtr = m_registryList.GetHead(); if (tempPtr == NULL) { goto LNew; } for (i =0; i < CacheLine::numEntries; i++) { if(tempPtr->m_pAddr[i] == NULL) { tempPtr->m_pAddr[i] = pv; RETURN pv; } } LNew: // initialize the bucket before returning tempPtr = new (nothrow) CacheLine(); if (tempPtr != NULL) { tempPtr->Init64(); tempPtr->m_pAddr[0] = pv; m_registryList.InsertHead(tempPtr); } else { // couldn't find space to register this page ClrVirtualFree(pv, 0, MEM_RELEASE); RETURN NULL; } } RETURN pv; } /////////////////////////////////////////////////////// // void CCacheLineAllocator::VFree(void* pv) // ////////////////////////////////////////////////////// void CCacheLineAllocator::VFree(void* pv) { BOOL bRes = FALSE; CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(pv)); POSTCONDITION(bRes); } CONTRACT_END; // helper to call virtual free to release memory bRes = ClrVirtualFree (pv, 0, MEM_RELEASE); RETURN_VOID; } /////////////////////////////////////////////////////// // void *CCacheLineAllocator::GetCacheLine() // ////////////////////////////////////////////////////// //WARNING: must have a lock when calling this function void *CCacheLineAllocator::GetCacheLine64() { CONTRACT(void*) { NOTHROW; GC_NOTRIGGER; MODE_ANY; INJECT_FAULT(CONTRACT_RETURN NULL); POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); } CONTRACT_END; LPCacheLine tempPtr = m_freeList64.RemoveHead(); if (tempPtr != NULL) { // initialize the bucket before returning tempPtr->Init64(); RETURN tempPtr; } #define AllocSize (4096*16) ////////////////////////////////' /// Virtual Allocation for some more cache lines BYTE* ptr = (BYTE*)VAlloc(AllocSize); if(!ptr) RETURN NULL; tempPtr = (LPCacheLine)ptr; // Link all the buckets tempPtr = tempPtr+1; LPCacheLine maxPtr = (LPCacheLine)(ptr + AllocSize); while(tempPtr < maxPtr) { m_freeList64.InsertHead(tempPtr); tempPtr++; } // return the first block tempPtr = (LPCacheLine)ptr; tempPtr->Init64(); RETURN tempPtr; } /////////////////////////////////////////////////////// // void *CCacheLineAllocator::GetCacheLine32() // ////////////////////////////////////////////////////// //WARNING: must have a lock when calling this function void *CCacheLineAllocator::GetCacheLine32() { CONTRACT(void*) { NOTHROW; GC_NOTRIGGER; MODE_ANY; INJECT_FAULT(CONTRACT_RETURN NULL); POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); } CONTRACT_END; LPCacheLine tempPtr = m_freeList32.RemoveHead(); if (tempPtr != NULL) { // initialize the bucket before returning tempPtr->Init32(); RETURN tempPtr; } tempPtr = (LPCacheLine)GetCacheLine64(); if (tempPtr != NULL) { m_freeList32.InsertHead(tempPtr); tempPtr = (LPCacheLine)((BYTE *)tempPtr+32); } RETURN tempPtr; } /////////////////////////////////////////////////////// // void CCacheLineAllocator::FreeCacheLine64(void * tempPtr) // ////////////////////////////////////////////////////// //WARNING: must have a lock when calling this function void CCacheLineAllocator::FreeCacheLine64(void * tempPtr) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(tempPtr)); } CONTRACTL_END; LPCacheLine pCLine = (LPCacheLine )tempPtr; m_freeList64.InsertHead(pCLine); } /////////////////////////////////////////////////////// // void CCacheLineAllocator::FreeCacheLine32(void * tempPtr) // ////////////////////////////////////////////////////// //WARNING: must have a lock when calling this function void CCacheLineAllocator::FreeCacheLine32(void * tempPtr) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(tempPtr)); } CONTRACTL_END; LPCacheLine pCLine = (LPCacheLine )tempPtr; m_freeList32.InsertHead(pCLine); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //--------------------------------------------------------------------------- // CCacheLineAllocator // // // This file dImplements the CCacheLineAllocator class. // // @comm // // Notes: // The CacheLineAllocator maintains a pool of free CacheLines // // The CacheLine Allocator provides static member functions // GetCacheLine and FreeCacheLine, //--------------------------------------------------------------------------- #include "common.h" #include <stddef.h> #include "cachelinealloc.h" #include "threads.h" #include "excep.h" /////////////////////////////////////////////////////// // CCacheLineAllocator::CCacheLineAllocator() // ////////////////////////////////////////////////////// CCacheLineAllocator::CCacheLineAllocator() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; m_freeList32.Init(); m_freeList64.Init(); m_registryList.Init(); } /////////////////////////////////////////////////////// // void CCacheLineAllocator::~CCacheLineAllocator() // ////////////////////////////////////////////////////// CCacheLineAllocator::~CCacheLineAllocator() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; LPCacheLine tempPtr = NULL; while((tempPtr = m_registryList.RemoveHead()) != NULL) { for (int i =0; i < CacheLine::numEntries; i++) { if(tempPtr->m_pAddr[i] != NULL) { if (!g_fProcessDetach) VFree(tempPtr->m_pAddr[i]); } } delete tempPtr; } } /////////////////////////////////////////////////////// // static void *CCacheLineAllocator::VAlloc(ULONG cbSize) // ////////////////////////////////////////////////////// void *CCacheLineAllocator::VAlloc(ULONG cbSize) { CONTRACT(void*) { NOTHROW; GC_NOTRIGGER; MODE_ANY; INJECT_FAULT(CONTRACT_RETURN NULL); POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); } CONTRACT_END; // helper to call virtual free to release memory int i =0; void* pv = ClrVirtualAlloc (NULL, cbSize, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); if (pv != NULL) { LPCacheLine tempPtr = m_registryList.GetHead(); if (tempPtr == NULL) { goto LNew; } for (i =0; i < CacheLine::numEntries; i++) { if(tempPtr->m_pAddr[i] == NULL) { tempPtr->m_pAddr[i] = pv; RETURN pv; } } LNew: // initialize the bucket before returning tempPtr = new (nothrow) CacheLine(); if (tempPtr != NULL) { tempPtr->Init64(); tempPtr->m_pAddr[0] = pv; m_registryList.InsertHead(tempPtr); } else { // couldn't find space to register this page ClrVirtualFree(pv, 0, MEM_RELEASE); RETURN NULL; } } RETURN pv; } /////////////////////////////////////////////////////// // void CCacheLineAllocator::VFree(void* pv) // ////////////////////////////////////////////////////// void CCacheLineAllocator::VFree(void* pv) { BOOL bRes = FALSE; CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(pv)); POSTCONDITION(bRes); } CONTRACT_END; // helper to call virtual free to release memory bRes = ClrVirtualFree (pv, 0, MEM_RELEASE); RETURN_VOID; } /////////////////////////////////////////////////////// // void *CCacheLineAllocator::GetCacheLine() // ////////////////////////////////////////////////////// //WARNING: must have a lock when calling this function void *CCacheLineAllocator::GetCacheLine64() { CONTRACT(void*) { NOTHROW; GC_NOTRIGGER; MODE_ANY; INJECT_FAULT(CONTRACT_RETURN NULL); POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); } CONTRACT_END; LPCacheLine tempPtr = m_freeList64.RemoveHead(); if (tempPtr != NULL) { // initialize the bucket before returning tempPtr->Init64(); RETURN tempPtr; } #define AllocSize (4096*16) ////////////////////////////////' /// Virtual Allocation for some more cache lines BYTE* ptr = (BYTE*)VAlloc(AllocSize); if(!ptr) RETURN NULL; tempPtr = (LPCacheLine)ptr; // Link all the buckets tempPtr = tempPtr+1; LPCacheLine maxPtr = (LPCacheLine)(ptr + AllocSize); while(tempPtr < maxPtr) { m_freeList64.InsertHead(tempPtr); tempPtr++; } // return the first block tempPtr = (LPCacheLine)ptr; tempPtr->Init64(); RETURN tempPtr; } /////////////////////////////////////////////////////// // void *CCacheLineAllocator::GetCacheLine32() // ////////////////////////////////////////////////////// //WARNING: must have a lock when calling this function void *CCacheLineAllocator::GetCacheLine32() { CONTRACT(void*) { NOTHROW; GC_NOTRIGGER; MODE_ANY; INJECT_FAULT(CONTRACT_RETURN NULL); POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); } CONTRACT_END; LPCacheLine tempPtr = m_freeList32.RemoveHead(); if (tempPtr != NULL) { // initialize the bucket before returning tempPtr->Init32(); RETURN tempPtr; } tempPtr = (LPCacheLine)GetCacheLine64(); if (tempPtr != NULL) { m_freeList32.InsertHead(tempPtr); tempPtr = (LPCacheLine)((BYTE *)tempPtr+32); } RETURN tempPtr; } /////////////////////////////////////////////////////// // void CCacheLineAllocator::FreeCacheLine64(void * tempPtr) // ////////////////////////////////////////////////////// //WARNING: must have a lock when calling this function void CCacheLineAllocator::FreeCacheLine64(void * tempPtr) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(tempPtr)); } CONTRACTL_END; LPCacheLine pCLine = (LPCacheLine )tempPtr; m_freeList64.InsertHead(pCLine); } /////////////////////////////////////////////////////// // void CCacheLineAllocator::FreeCacheLine32(void * tempPtr) // ////////////////////////////////////////////////////// //WARNING: must have a lock when calling this function void CCacheLineAllocator::FreeCacheLine32(void * tempPtr) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(tempPtr)); } CONTRACTL_END; LPCacheLine pCLine = (LPCacheLine )tempPtr; m_freeList32.InsertHead(pCLine); }
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/gc/unix/events.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <cstdint> #include <cstddef> #include <cassert> #include <memory> #include <mutex> #include <pthread.h> #include <errno.h> #include "config.gc.h" #include "common.h" #include "gcenv.structs.h" #include "gcenv.base.h" #include "gcenv.os.h" #include "globals.h" namespace { #if HAVE_PTHREAD_CONDATTR_SETCLOCK void TimeSpecAdd(timespec* time, uint32_t milliseconds) { uint64_t nsec = time->tv_nsec + (uint64_t)milliseconds * tccMilliSecondsToNanoSeconds; if (nsec >= tccSecondsToNanoSeconds) { time->tv_sec += nsec / tccSecondsToNanoSeconds; nsec %= tccSecondsToNanoSeconds; } time->tv_nsec = nsec; } #endif // HAVE_PTHREAD_CONDATTR_SETCLOCK #if HAVE_CLOCK_GETTIME_NSEC_NP // Convert nanoseconds to the timespec structure // Parameters: // nanoseconds - time in nanoseconds to convert // t - the target timespec structure void NanosecondsToTimeSpec(uint64_t nanoseconds, timespec* t) { t->tv_sec = nanoseconds / tccSecondsToNanoSeconds; t->tv_nsec = nanoseconds % tccSecondsToNanoSeconds; } #endif // HAVE_CLOCK_GETTIME_NSEC_NP } // anonymous namespace class GCEvent::Impl { pthread_cond_t m_condition; pthread_mutex_t m_mutex; bool m_manualReset; bool m_state; bool m_isValid; public: Impl(bool manualReset, bool initialState) : m_manualReset(manualReset), m_state(initialState), m_isValid(false) { } bool Initialize() { pthread_condattr_t attrs; int st = pthread_condattr_init(&attrs); if (st != 0) { assert(!"Failed to initialize UnixEvent condition attribute"); return false; } // TODO(segilles) implement this for CoreCLR //PthreadCondAttrHolder attrsHolder(&attrs); #if HAVE_PTHREAD_CONDATTR_SETCLOCK && !HAVE_CLOCK_GETTIME_NSEC_NP // Ensure that the pthread_cond_timedwait will use CLOCK_MONOTONIC st = pthread_condattr_setclock(&attrs, CLOCK_MONOTONIC); if (st != 0) { assert(!"Failed to set UnixEvent condition variable wait clock"); return false; } #endif // HAVE_PTHREAD_CONDATTR_SETCLOCK && !HAVE_CLOCK_GETTIME_NSEC_NP st = pthread_mutex_init(&m_mutex, NULL); if (st != 0) { assert(!"Failed to initialize UnixEvent mutex"); return false; } st = pthread_cond_init(&m_condition, &attrs); if (st != 0) { assert(!"Failed to initialize UnixEvent condition variable"); st = pthread_mutex_destroy(&m_mutex); assert(st == 0 && "Failed to destroy UnixEvent mutex"); return false; } m_isValid = true; return true; } void CloseEvent() { if (m_isValid) { int st = pthread_mutex_destroy(&m_mutex); assert(st == 0 && "Failed to destroy UnixEvent mutex"); st = pthread_cond_destroy(&m_condition); assert(st == 0 && "Failed to destroy UnixEvent condition variable"); } } uint32_t Wait(uint32_t milliseconds, bool alertable) { UNREFERENCED_PARAMETER(alertable); timespec endTime; #if HAVE_CLOCK_GETTIME_NSEC_NP uint64_t endMachTime; if (milliseconds != INFINITE) { uint64_t nanoseconds = (uint64_t)milliseconds * tccMilliSecondsToNanoSeconds; endMachTime = clock_gettime_nsec_np(CLOCK_UPTIME_RAW) + nanoseconds; } #elif HAVE_PTHREAD_CONDATTR_SETCLOCK if (milliseconds != INFINITE) { clock_gettime(CLOCK_MONOTONIC, &endTime); TimeSpecAdd(&endTime, milliseconds); } #else #error "Don't know how to perform timed wait on this platform" #endif int st = 0; pthread_mutex_lock(&m_mutex); while (!m_state) { if (milliseconds == INFINITE) { st = pthread_cond_wait(&m_condition, &m_mutex); } else { #if HAVE_CLOCK_GETTIME_NSEC_NP // Since OSX doesn't support CLOCK_MONOTONIC, we use relative variant of the // timed wait and we need to handle spurious wakeups properly. st = pthread_cond_timedwait_relative_np(&m_condition, &m_mutex, &endTime); if ((st == 0) && !m_state) { uint64_t machTime = clock_gettime_nsec_np(CLOCK_UPTIME_RAW); if (machTime < endMachTime) { // The wake up was spurious, recalculate the relative endTime uint64_t remainingNanoseconds = endMachTime - machTime; NanosecondsToTimeSpec(remainingNanoseconds, &endTime); } else { // Although the timed wait didn't report a timeout, time calculated from the // mach time shows we have already reached the end time. It can happen if // the wait was spuriously woken up right before the timeout. st = ETIMEDOUT; } } #else // HAVE_CLOCK_GETTIME_NSEC_NP st = pthread_cond_timedwait(&m_condition, &m_mutex, &endTime); #endif // HAVE_CLOCK_GETTIME_NSEC_NP // Verify that if the wait timed out, the event was not set assert((st != ETIMEDOUT) || !m_state); } if (st != 0) { // wait failed or timed out break; } } if ((st == 0) && !m_manualReset) { // Clear the state for auto-reset events so that only one waiter gets released m_state = false; } pthread_mutex_unlock(&m_mutex); uint32_t waitStatus; if (st == 0) { waitStatus = WAIT_OBJECT_0; } else if (st == ETIMEDOUT) { waitStatus = WAIT_TIMEOUT; } else { waitStatus = WAIT_FAILED; } return waitStatus; } void Set() { pthread_mutex_lock(&m_mutex); m_state = true; pthread_mutex_unlock(&m_mutex); // Unblock all threads waiting for the condition variable pthread_cond_broadcast(&m_condition); } void Reset() { pthread_mutex_lock(&m_mutex); m_state = false; pthread_mutex_unlock(&m_mutex); } }; GCEvent::GCEvent() : m_impl(nullptr) { } void GCEvent::CloseEvent() { assert(m_impl != nullptr); m_impl->CloseEvent(); } void GCEvent::Set() { assert(m_impl != nullptr); m_impl->Set(); } void GCEvent::Reset() { assert(m_impl != nullptr); m_impl->Reset(); } uint32_t GCEvent::Wait(uint32_t timeout, bool alertable) { assert(m_impl != nullptr); return m_impl->Wait(timeout, alertable); } bool GCEvent::CreateAutoEventNoThrow(bool initialState) { // This implementation of GCEvent makes no distinction between // host-aware and non-host-aware events (since there will be no host). return CreateOSAutoEventNoThrow(initialState); } bool GCEvent::CreateManualEventNoThrow(bool initialState) { // This implementation of GCEvent makes no distinction between // host-aware and non-host-aware events (since there will be no host). return CreateOSManualEventNoThrow(initialState); } bool GCEvent::CreateOSAutoEventNoThrow(bool initialState) { assert(m_impl == nullptr); std::unique_ptr<GCEvent::Impl> event(new (std::nothrow) GCEvent::Impl(false, initialState)); if (!event) { return false; } if (!event->Initialize()) { return false; } m_impl = event.release(); return true; } bool GCEvent::CreateOSManualEventNoThrow(bool initialState) { assert(m_impl == nullptr); std::unique_ptr<GCEvent::Impl> event(new (std::nothrow) GCEvent::Impl(true, initialState)); if (!event) { return false; } if (!event->Initialize()) { return false; } m_impl = event.release(); return true; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <cstdint> #include <cstddef> #include <cassert> #include <memory> #include <mutex> #include <pthread.h> #include <errno.h> #include "config.gc.h" #include "common.h" #include "gcenv.structs.h" #include "gcenv.base.h" #include "gcenv.os.h" #include "globals.h" namespace { #if HAVE_PTHREAD_CONDATTR_SETCLOCK void TimeSpecAdd(timespec* time, uint32_t milliseconds) { uint64_t nsec = time->tv_nsec + (uint64_t)milliseconds * tccMilliSecondsToNanoSeconds; if (nsec >= tccSecondsToNanoSeconds) { time->tv_sec += nsec / tccSecondsToNanoSeconds; nsec %= tccSecondsToNanoSeconds; } time->tv_nsec = nsec; } #endif // HAVE_PTHREAD_CONDATTR_SETCLOCK #if HAVE_CLOCK_GETTIME_NSEC_NP // Convert nanoseconds to the timespec structure // Parameters: // nanoseconds - time in nanoseconds to convert // t - the target timespec structure void NanosecondsToTimeSpec(uint64_t nanoseconds, timespec* t) { t->tv_sec = nanoseconds / tccSecondsToNanoSeconds; t->tv_nsec = nanoseconds % tccSecondsToNanoSeconds; } #endif // HAVE_CLOCK_GETTIME_NSEC_NP } // anonymous namespace class GCEvent::Impl { pthread_cond_t m_condition; pthread_mutex_t m_mutex; bool m_manualReset; bool m_state; bool m_isValid; public: Impl(bool manualReset, bool initialState) : m_manualReset(manualReset), m_state(initialState), m_isValid(false) { } bool Initialize() { pthread_condattr_t attrs; int st = pthread_condattr_init(&attrs); if (st != 0) { assert(!"Failed to initialize UnixEvent condition attribute"); return false; } // TODO(segilles) implement this for CoreCLR //PthreadCondAttrHolder attrsHolder(&attrs); #if HAVE_PTHREAD_CONDATTR_SETCLOCK && !HAVE_CLOCK_GETTIME_NSEC_NP // Ensure that the pthread_cond_timedwait will use CLOCK_MONOTONIC st = pthread_condattr_setclock(&attrs, CLOCK_MONOTONIC); if (st != 0) { assert(!"Failed to set UnixEvent condition variable wait clock"); return false; } #endif // HAVE_PTHREAD_CONDATTR_SETCLOCK && !HAVE_CLOCK_GETTIME_NSEC_NP st = pthread_mutex_init(&m_mutex, NULL); if (st != 0) { assert(!"Failed to initialize UnixEvent mutex"); return false; } st = pthread_cond_init(&m_condition, &attrs); if (st != 0) { assert(!"Failed to initialize UnixEvent condition variable"); st = pthread_mutex_destroy(&m_mutex); assert(st == 0 && "Failed to destroy UnixEvent mutex"); return false; } m_isValid = true; return true; } void CloseEvent() { if (m_isValid) { int st = pthread_mutex_destroy(&m_mutex); assert(st == 0 && "Failed to destroy UnixEvent mutex"); st = pthread_cond_destroy(&m_condition); assert(st == 0 && "Failed to destroy UnixEvent condition variable"); } } uint32_t Wait(uint32_t milliseconds, bool alertable) { UNREFERENCED_PARAMETER(alertable); timespec endTime; #if HAVE_CLOCK_GETTIME_NSEC_NP uint64_t endMachTime; if (milliseconds != INFINITE) { uint64_t nanoseconds = (uint64_t)milliseconds * tccMilliSecondsToNanoSeconds; endMachTime = clock_gettime_nsec_np(CLOCK_UPTIME_RAW) + nanoseconds; } #elif HAVE_PTHREAD_CONDATTR_SETCLOCK if (milliseconds != INFINITE) { clock_gettime(CLOCK_MONOTONIC, &endTime); TimeSpecAdd(&endTime, milliseconds); } #else #error "Don't know how to perform timed wait on this platform" #endif int st = 0; pthread_mutex_lock(&m_mutex); while (!m_state) { if (milliseconds == INFINITE) { st = pthread_cond_wait(&m_condition, &m_mutex); } else { #if HAVE_CLOCK_GETTIME_NSEC_NP // Since OSX doesn't support CLOCK_MONOTONIC, we use relative variant of the // timed wait and we need to handle spurious wakeups properly. st = pthread_cond_timedwait_relative_np(&m_condition, &m_mutex, &endTime); if ((st == 0) && !m_state) { uint64_t machTime = clock_gettime_nsec_np(CLOCK_UPTIME_RAW); if (machTime < endMachTime) { // The wake up was spurious, recalculate the relative endTime uint64_t remainingNanoseconds = endMachTime - machTime; NanosecondsToTimeSpec(remainingNanoseconds, &endTime); } else { // Although the timed wait didn't report a timeout, time calculated from the // mach time shows we have already reached the end time. It can happen if // the wait was spuriously woken up right before the timeout. st = ETIMEDOUT; } } #else // HAVE_CLOCK_GETTIME_NSEC_NP st = pthread_cond_timedwait(&m_condition, &m_mutex, &endTime); #endif // HAVE_CLOCK_GETTIME_NSEC_NP // Verify that if the wait timed out, the event was not set assert((st != ETIMEDOUT) || !m_state); } if (st != 0) { // wait failed or timed out break; } } if ((st == 0) && !m_manualReset) { // Clear the state for auto-reset events so that only one waiter gets released m_state = false; } pthread_mutex_unlock(&m_mutex); uint32_t waitStatus; if (st == 0) { waitStatus = WAIT_OBJECT_0; } else if (st == ETIMEDOUT) { waitStatus = WAIT_TIMEOUT; } else { waitStatus = WAIT_FAILED; } return waitStatus; } void Set() { pthread_mutex_lock(&m_mutex); m_state = true; pthread_mutex_unlock(&m_mutex); // Unblock all threads waiting for the condition variable pthread_cond_broadcast(&m_condition); } void Reset() { pthread_mutex_lock(&m_mutex); m_state = false; pthread_mutex_unlock(&m_mutex); } }; GCEvent::GCEvent() : m_impl(nullptr) { } void GCEvent::CloseEvent() { assert(m_impl != nullptr); m_impl->CloseEvent(); } void GCEvent::Set() { assert(m_impl != nullptr); m_impl->Set(); } void GCEvent::Reset() { assert(m_impl != nullptr); m_impl->Reset(); } uint32_t GCEvent::Wait(uint32_t timeout, bool alertable) { assert(m_impl != nullptr); return m_impl->Wait(timeout, alertable); } bool GCEvent::CreateAutoEventNoThrow(bool initialState) { // This implementation of GCEvent makes no distinction between // host-aware and non-host-aware events (since there will be no host). return CreateOSAutoEventNoThrow(initialState); } bool GCEvent::CreateManualEventNoThrow(bool initialState) { // This implementation of GCEvent makes no distinction between // host-aware and non-host-aware events (since there will be no host). return CreateOSManualEventNoThrow(initialState); } bool GCEvent::CreateOSAutoEventNoThrow(bool initialState) { assert(m_impl == nullptr); std::unique_ptr<GCEvent::Impl> event(new (std::nothrow) GCEvent::Impl(false, initialState)); if (!event) { return false; } if (!event->Initialize()) { return false; } m_impl = event.release(); return true; } bool GCEvent::CreateOSManualEventNoThrow(bool initialState) { assert(m_impl == nullptr); std::unique_ptr<GCEvent::Impl> event(new (std::nothrow) GCEvent::Impl(true, initialState)); if (!event) { return false; } if (!event->Initialize()) { return false; } m_impl = event.release(); return true; }
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/pal/src/libunwind_mac/include/ucontext.h
/* Copyright (C) 2004 Hewlett-Packard Co. Contributed by David Mosberger-Tang <[email protected]>. This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define UC_MCONTEXT_GREGS_R8 0x28 #define UC_MCONTEXT_GREGS_R9 0x30 #define UC_MCONTEXT_GREGS_R10 0x38 #define UC_MCONTEXT_GREGS_R11 0x40 #define UC_MCONTEXT_GREGS_R12 0x48 #define UC_MCONTEXT_GREGS_R13 0x50 #define UC_MCONTEXT_GREGS_R14 0x58 #define UC_MCONTEXT_GREGS_R15 0x60 #define UC_MCONTEXT_GREGS_RDI 0x68 #define UC_MCONTEXT_GREGS_RSI 0x70 #define UC_MCONTEXT_GREGS_RBP 0x78 #define UC_MCONTEXT_GREGS_RBX 0x80 #define UC_MCONTEXT_GREGS_RDX 0x88 #define UC_MCONTEXT_GREGS_RAX 0x90 #define UC_MCONTEXT_GREGS_RCX 0x98 #define UC_MCONTEXT_GREGS_RSP 0xa0 #define UC_MCONTEXT_GREGS_RIP 0xa8 #define UC_MCONTEXT_FPREGS_PTR 0x1a8 #define UC_MCONTEXT_FPREGS_MEM 0xe0 #define UC_SIGMASK 0x128 #define FPREGS_OFFSET_MXCSR 0x18 #include <sys/ucontext.h>
/* Copyright (C) 2004 Hewlett-Packard Co. Contributed by David Mosberger-Tang <[email protected]>. This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define UC_MCONTEXT_GREGS_R8 0x28 #define UC_MCONTEXT_GREGS_R9 0x30 #define UC_MCONTEXT_GREGS_R10 0x38 #define UC_MCONTEXT_GREGS_R11 0x40 #define UC_MCONTEXT_GREGS_R12 0x48 #define UC_MCONTEXT_GREGS_R13 0x50 #define UC_MCONTEXT_GREGS_R14 0x58 #define UC_MCONTEXT_GREGS_R15 0x60 #define UC_MCONTEXT_GREGS_RDI 0x68 #define UC_MCONTEXT_GREGS_RSI 0x70 #define UC_MCONTEXT_GREGS_RBP 0x78 #define UC_MCONTEXT_GREGS_RBX 0x80 #define UC_MCONTEXT_GREGS_RDX 0x88 #define UC_MCONTEXT_GREGS_RAX 0x90 #define UC_MCONTEXT_GREGS_RCX 0x98 #define UC_MCONTEXT_GREGS_RSP 0xa0 #define UC_MCONTEXT_GREGS_RIP 0xa8 #define UC_MCONTEXT_FPREGS_PTR 0x1a8 #define UC_MCONTEXT_FPREGS_MEM 0xe0 #define UC_SIGMASK 0x128 #define FPREGS_OFFSET_MXCSR 0x18 #include <sys/ucontext.h>
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/pal/tests/palsuite/c_runtime/printf/test14/test14.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test14.c ** ** Purpose: Test #14 for the printf function. Tests the lowercase ** exponential notation double specifier (%e) ** ** **==========================================================================*/ #include <palsuite.h> #include "../printf.h" PALTEST(c_runtime_printf_test14_paltest_printf_test14, "c_runtime/printf/test14/paltest_printf_test14") { double val = 256.0; double neg = -256.0; if (PAL_Initialize(argc, argv)) { return FAIL; } DoDoubleTest("foo %e", val, "foo 2.560000e+002", "foo 2.560000e+02"); DoDoubleTest("foo %le", val, "foo 2.560000e+002", "foo 2.560000e+02"); DoDoubleTest("foo %he", val, "foo 2.560000e+002", "foo 2.560000e+02"); DoDoubleTest("foo %Le", val, "foo 2.560000e+002", "foo 2.560000e+02"); DoDoubleTest("foo %I64e", val, "foo 2.560000e+002", "foo 2.560000e+02"); DoDoubleTest("foo %14e", val, "foo 2.560000e+002", "foo 2.560000e+02"); DoDoubleTest("foo %-14e", val, "foo 2.560000e+002 ", "foo 2.560000e+02 "); DoDoubleTest("foo %.1e", val, "foo 2.6e+002", "foo 2.6e+02"); DoDoubleTest("foo %.8e", val, "foo 2.56000000e+002", "foo 2.56000000e+02"); DoDoubleTest("foo %014e", val, "foo 02.560000e+002", "foo 002.560000e+02"); DoDoubleTest("foo %#e", val, "foo 2.560000e+002", "foo 2.560000e+02"); DoDoubleTest("foo %+e", val, "foo +2.560000e+002", "foo +2.560000e+02"); DoDoubleTest("foo % e", val, "foo 2.560000e+002", "foo 2.560000e+02"); DoDoubleTest("foo %+e", neg, "foo -2.560000e+002", "foo -2.560000e+02"); DoDoubleTest("foo % e", neg, "foo -2.560000e+002", "foo -2.560000e+02"); PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test14.c ** ** Purpose: Test #14 for the printf function. Tests the lowercase ** exponential notation double specifier (%e) ** ** **==========================================================================*/ #include <palsuite.h> #include "../printf.h" PALTEST(c_runtime_printf_test14_paltest_printf_test14, "c_runtime/printf/test14/paltest_printf_test14") { double val = 256.0; double neg = -256.0; if (PAL_Initialize(argc, argv)) { return FAIL; } DoDoubleTest("foo %e", val, "foo 2.560000e+002", "foo 2.560000e+02"); DoDoubleTest("foo %le", val, "foo 2.560000e+002", "foo 2.560000e+02"); DoDoubleTest("foo %he", val, "foo 2.560000e+002", "foo 2.560000e+02"); DoDoubleTest("foo %Le", val, "foo 2.560000e+002", "foo 2.560000e+02"); DoDoubleTest("foo %I64e", val, "foo 2.560000e+002", "foo 2.560000e+02"); DoDoubleTest("foo %14e", val, "foo 2.560000e+002", "foo 2.560000e+02"); DoDoubleTest("foo %-14e", val, "foo 2.560000e+002 ", "foo 2.560000e+02 "); DoDoubleTest("foo %.1e", val, "foo 2.6e+002", "foo 2.6e+02"); DoDoubleTest("foo %.8e", val, "foo 2.56000000e+002", "foo 2.56000000e+02"); DoDoubleTest("foo %014e", val, "foo 02.560000e+002", "foo 002.560000e+02"); DoDoubleTest("foo %#e", val, "foo 2.560000e+002", "foo 2.560000e+02"); DoDoubleTest("foo %+e", val, "foo +2.560000e+002", "foo +2.560000e+02"); DoDoubleTest("foo % e", val, "foo 2.560000e+002", "foo 2.560000e+02"); DoDoubleTest("foo %+e", neg, "foo -2.560000e+002", "foo -2.560000e+02"); DoDoubleTest("foo % e", neg, "foo -2.560000e+002", "foo -2.560000e+02"); PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,411
Arm64: Always use SIMD features
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
kunalspathak
2022-03-09T21:00:00Z
2022-03-12T04:35:06Z
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`. Will fix https://github.com/dotnet/runtime/issues/64972 Thanks @tannergooding for your the references: - https://github.com/dotnet/runtime/issues/66206 - https://github.com/dotnet/runtime/issues/11701 - https://github.com/dotnet/runtime/issues/9473.
./src/coreclr/pal/tests/palsuite/c_runtime/vsprintf/test1/test1.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test1.c ** ** Purpose: Test #1 for the vsprintf function. ** ** **===================================================================*/ #include <palsuite.h> #include "../vsprintf.h" /* * Notes: memcmp is used, as is strlen. */ PALTEST(c_runtime_vsprintf_test1_paltest_vsprintf_test1, "c_runtime/vsprintf/test1/paltest_vsprintf_test1") { char checkstr[] = "hello world"; char buf[256] = { 0 }; int ret; if (PAL_Initialize(argc, argv) != 0) { return(FAIL); } testvsp(buf, ARRAY_SIZE(buf), "hello world"); if (memcmp(checkstr, buf, strlen(checkstr)+1) != 0) { Fail("ERROR: expected \"%s\" (up to %d chars), got \"%s\"\n", checkstr, 256, buf); } testvsp(buf, ARRAY_SIZE(buf), "xxxxxxxxxxxxxxxxx"); ret = testvsp(buf, ARRAY_SIZE(buf), "hello world"); if (ret != strlen(checkstr)) { Fail("ERROR: expected negative return value, got %d", ret); } if (memcmp(checkstr, buf, ret) != 0) { Fail("ERROR: expected %s, got %s\n", checkstr, buf); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test1.c ** ** Purpose: Test #1 for the vsprintf function. ** ** **===================================================================*/ #include <palsuite.h> #include "../vsprintf.h" /* * Notes: memcmp is used, as is strlen. */ PALTEST(c_runtime_vsprintf_test1_paltest_vsprintf_test1, "c_runtime/vsprintf/test1/paltest_vsprintf_test1") { char checkstr[] = "hello world"; char buf[256] = { 0 }; int ret; if (PAL_Initialize(argc, argv) != 0) { return(FAIL); } testvsp(buf, ARRAY_SIZE(buf), "hello world"); if (memcmp(checkstr, buf, strlen(checkstr)+1) != 0) { Fail("ERROR: expected \"%s\" (up to %d chars), got \"%s\"\n", checkstr, 256, buf); } testvsp(buf, ARRAY_SIZE(buf), "xxxxxxxxxxxxxxxxx"); ret = testvsp(buf, ARRAY_SIZE(buf), "hello world"); if (ret != strlen(checkstr)) { Fail("ERROR: expected negative return value, got %d", ret); } if (memcmp(checkstr, buf, ret) != 0) { Fail("ERROR: expected %s, got %s\n", checkstr, buf); } PAL_Terminate(); return PASS; }
-1